time.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * vineetg: Jan 1011
  9. * -sched_clock( ) no longer jiffies based. Uses the same clocksource
  10. * as gtod
  11. *
  12. * Rajeshwarr/Vineetg: Mar 2008
  13. * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
  14. * for arch independent gettimeofday()
  15. * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
  16. *
  17. * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
  18. */
  19. /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
  20. * Each can programmed to go from @count to @limit and optionally
  21. * interrupt when that happens.
  22. * A write to Control Register clears the Interrupt
  23. *
  24. * We've designated TIMER0 for events (clockevents)
  25. * while TIMER1 for free running (clocksource)
  26. *
  27. * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
  28. * which however is currently broken
  29. */
  30. #include <linux/interrupt.h>
  31. #include <linux/clk.h>
  32. #include <linux/clk-provider.h>
  33. #include <linux/clocksource.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <linux/of.h>
  37. #include <linux/of_irq.h>
  38. #include <asm/irq.h>
  39. #include <asm/arcregs.h>
  40. #include <asm/mcip.h>
  41. /* Timer related Aux registers */
  42. #define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
  43. #define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
  44. #define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
  45. #define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
  46. #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
  47. #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
  48. #define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
  49. #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
  50. #define ARC_TIMER_MAX 0xFFFFFFFF
  51. static unsigned long arc_timer_freq;
  52. static int noinline arc_get_timer_clk(struct device_node *node)
  53. {
  54. struct clk *clk;
  55. int ret;
  56. clk = of_clk_get(node, 0);
  57. if (IS_ERR(clk)) {
  58. pr_err("timer missing clk");
  59. return PTR_ERR(clk);
  60. }
  61. ret = clk_prepare_enable(clk);
  62. if (ret) {
  63. pr_err("Couldn't enable parent clk\n");
  64. return ret;
  65. }
  66. arc_timer_freq = clk_get_rate(clk);
  67. return 0;
  68. }
  69. /********** Clock Source Device *********/
  70. #ifdef CONFIG_ARC_HAS_GFRC
  71. static cycle_t arc_read_gfrc(struct clocksource *cs)
  72. {
  73. unsigned long flags;
  74. union {
  75. #ifdef CONFIG_CPU_BIG_ENDIAN
  76. struct { u32 h, l; };
  77. #else
  78. struct { u32 l, h; };
  79. #endif
  80. cycle_t full;
  81. } stamp;
  82. local_irq_save(flags);
  83. __mcip_cmd(CMD_GFRC_READ_LO, 0);
  84. stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
  85. __mcip_cmd(CMD_GFRC_READ_HI, 0);
  86. stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
  87. local_irq_restore(flags);
  88. return stamp.full;
  89. }
  90. static struct clocksource arc_counter_gfrc = {
  91. .name = "ARConnect GFRC",
  92. .rating = 400,
  93. .read = arc_read_gfrc,
  94. .mask = CLOCKSOURCE_MASK(64),
  95. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  96. };
  97. static int __init arc_cs_setup_gfrc(struct device_node *node)
  98. {
  99. int exists = cpuinfo_arc700[0].extn.gfrc;
  100. int ret;
  101. if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
  102. return -ENXIO;
  103. ret = arc_get_timer_clk(node);
  104. if (ret)
  105. return ret;
  106. return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
  107. }
  108. CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
  109. #endif
  110. #ifdef CONFIG_ARC_HAS_RTC
  111. #define AUX_RTC_CTRL 0x103
  112. #define AUX_RTC_LOW 0x104
  113. #define AUX_RTC_HIGH 0x105
  114. static cycle_t arc_read_rtc(struct clocksource *cs)
  115. {
  116. unsigned long status;
  117. union {
  118. #ifdef CONFIG_CPU_BIG_ENDIAN
  119. struct { u32 high, low; };
  120. #else
  121. struct { u32 low, high; };
  122. #endif
  123. cycle_t full;
  124. } stamp;
  125. __asm__ __volatile(
  126. "1: \n"
  127. " lr %0, [AUX_RTC_LOW] \n"
  128. " lr %1, [AUX_RTC_HIGH] \n"
  129. " lr %2, [AUX_RTC_CTRL] \n"
  130. " bbit0.nt %2, 31, 1b \n"
  131. : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
  132. return stamp.full;
  133. }
  134. static struct clocksource arc_counter_rtc = {
  135. .name = "ARCv2 RTC",
  136. .rating = 350,
  137. .read = arc_read_rtc,
  138. .mask = CLOCKSOURCE_MASK(64),
  139. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  140. };
  141. static int __init arc_cs_setup_rtc(struct device_node *node)
  142. {
  143. int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
  144. int ret;
  145. if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
  146. return -ENXIO;
  147. /* Local to CPU hence not usable in SMP */
  148. if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
  149. return -EINVAL;
  150. ret = arc_get_timer_clk(node);
  151. if (ret)
  152. return ret;
  153. write_aux_reg(AUX_RTC_CTRL, 1);
  154. return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
  155. }
  156. CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
  157. #endif
  158. /*
  159. * 32bit TIMER1 to keep counting monotonically and wraparound
  160. */
  161. static cycle_t arc_read_timer1(struct clocksource *cs)
  162. {
  163. return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
  164. }
  165. static struct clocksource arc_counter_timer1 = {
  166. .name = "ARC Timer1",
  167. .rating = 300,
  168. .read = arc_read_timer1,
  169. .mask = CLOCKSOURCE_MASK(32),
  170. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  171. };
  172. static int __init arc_cs_setup_timer1(struct device_node *node)
  173. {
  174. int ret;
  175. /* Local to CPU hence not usable in SMP */
  176. if (IS_ENABLED(CONFIG_SMP))
  177. return -EINVAL;
  178. ret = arc_get_timer_clk(node);
  179. if (ret)
  180. return ret;
  181. write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
  182. write_aux_reg(ARC_REG_TIMER1_CNT, 0);
  183. write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
  184. return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
  185. }
  186. /********** Clock Event Device *********/
  187. static int arc_timer_irq;
  188. /*
  189. * Arm the timer to interrupt after @cycles
  190. * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
  191. */
  192. static void arc_timer_event_setup(unsigned int cycles)
  193. {
  194. write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
  195. write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
  196. write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
  197. }
  198. static int arc_clkevent_set_next_event(unsigned long delta,
  199. struct clock_event_device *dev)
  200. {
  201. arc_timer_event_setup(delta);
  202. return 0;
  203. }
  204. static int arc_clkevent_set_periodic(struct clock_event_device *dev)
  205. {
  206. /*
  207. * At X Hz, 1 sec = 1000ms -> X cycles;
  208. * 10ms -> X / 100 cycles
  209. */
  210. arc_timer_event_setup(arc_timer_freq / HZ);
  211. return 0;
  212. }
  213. static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
  214. .name = "ARC Timer0",
  215. .features = CLOCK_EVT_FEAT_ONESHOT |
  216. CLOCK_EVT_FEAT_PERIODIC,
  217. .rating = 300,
  218. .set_next_event = arc_clkevent_set_next_event,
  219. .set_state_periodic = arc_clkevent_set_periodic,
  220. };
  221. static irqreturn_t timer_irq_handler(int irq, void *dev_id)
  222. {
  223. /*
  224. * Note that generic IRQ core could have passed @evt for @dev_id if
  225. * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
  226. */
  227. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  228. int irq_reenable = clockevent_state_periodic(evt);
  229. /*
  230. * Any write to CTRL reg ACks the interrupt, we rewrite the
  231. * Count when [N]ot [H]alted bit.
  232. * And re-arm it if perioid by [I]nterrupt [E]nable bit
  233. */
  234. write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
  235. evt->event_handler(evt);
  236. return IRQ_HANDLED;
  237. }
  238. static int arc_timer_starting_cpu(unsigned int cpu)
  239. {
  240. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  241. evt->cpumask = cpumask_of(smp_processor_id());
  242. clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
  243. enable_percpu_irq(arc_timer_irq, 0);
  244. return 0;
  245. }
  246. static int arc_timer_dying_cpu(unsigned int cpu)
  247. {
  248. disable_percpu_irq(arc_timer_irq);
  249. return 0;
  250. }
  251. /*
  252. * clockevent setup for boot CPU
  253. */
  254. static int __init arc_clockevent_setup(struct device_node *node)
  255. {
  256. struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
  257. int ret;
  258. arc_timer_irq = irq_of_parse_and_map(node, 0);
  259. if (arc_timer_irq <= 0) {
  260. pr_err("clockevent: missing irq");
  261. return -EINVAL;
  262. }
  263. ret = arc_get_timer_clk(node);
  264. if (ret) {
  265. pr_err("clockevent: missing clk");
  266. return ret;
  267. }
  268. /* Needs apriori irq_set_percpu_devid() done in intc map function */
  269. ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
  270. "Timer0 (per-cpu-tick)", evt);
  271. if (ret) {
  272. pr_err("clockevent: unable to request irq\n");
  273. return ret;
  274. }
  275. ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
  276. "AP_ARC_TIMER_STARTING",
  277. arc_timer_starting_cpu,
  278. arc_timer_dying_cpu);
  279. if (ret) {
  280. pr_err("Failed to setup hotplug state");
  281. return ret;
  282. }
  283. return 0;
  284. }
  285. static int __init arc_of_timer_init(struct device_node *np)
  286. {
  287. static int init_count = 0;
  288. int ret;
  289. if (!init_count) {
  290. init_count = 1;
  291. ret = arc_clockevent_setup(np);
  292. } else {
  293. ret = arc_cs_setup_timer1(np);
  294. }
  295. return ret;
  296. }
  297. CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
  298. /*
  299. * Called from start_kernel() - boot CPU only
  300. */
  301. void __init time_init(void)
  302. {
  303. of_clk_init(NULL);
  304. clocksource_probe();
  305. }