time.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * linux/arch/parisc/kernel/time.c
  3. *
  4. * Copyright (C) 1991, 1992, 1995 Linus Torvalds
  5. * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
  6. * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
  7. *
  8. * 1994-07-02 Alan Modra
  9. * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
  10. * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. */
  13. #include <linux/errno.h>
  14. #include <linux/module.h>
  15. #include <linux/rtc.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched_clock.h>
  18. #include <linux/kernel.h>
  19. #include <linux/param.h>
  20. #include <linux/string.h>
  21. #include <linux/mm.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/time.h>
  24. #include <linux/init.h>
  25. #include <linux/smp.h>
  26. #include <linux/profile.h>
  27. #include <linux/clocksource.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/ftrace.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/io.h>
  32. #include <asm/irq.h>
  33. #include <asm/page.h>
  34. #include <asm/param.h>
  35. #include <asm/pdc.h>
  36. #include <asm/led.h>
  37. #include <linux/timex.h>
  38. static unsigned long clocktick __read_mostly; /* timer cycles per tick */
  39. /*
  40. * We keep time on PA-RISC Linux by using the Interval Timer which is
  41. * a pair of registers; one is read-only and one is write-only; both
  42. * accessed through CR16. The read-only register is 32 or 64 bits wide,
  43. * and increments by 1 every CPU clock tick. The architecture only
  44. * guarantees us a rate between 0.5 and 2, but all implementations use a
  45. * rate of 1. The write-only register is 32-bits wide. When the lowest
  46. * 32 bits of the read-only register compare equal to the write-only
  47. * register, it raises a maskable external interrupt. Each processor has
  48. * an Interval Timer of its own and they are not synchronised.
  49. *
  50. * We want to generate an interrupt every 1/HZ seconds. So we program
  51. * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
  52. * is programmed with the intended time of the next tick. We can be
  53. * held off for an arbitrarily long period of time by interrupts being
  54. * disabled, so we may miss one or more ticks.
  55. */
  56. irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
  57. {
  58. unsigned long now;
  59. unsigned long next_tick;
  60. unsigned long ticks_elapsed = 0;
  61. unsigned int cpu = smp_processor_id();
  62. struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
  63. /* gcc can optimize for "read-only" case with a local clocktick */
  64. unsigned long cpt = clocktick;
  65. profile_tick(CPU_PROFILING);
  66. /* Initialize next_tick to the old expected tick time. */
  67. next_tick = cpuinfo->it_value;
  68. /* Calculate how many ticks have elapsed. */
  69. do {
  70. ++ticks_elapsed;
  71. next_tick += cpt;
  72. now = mfctl(16);
  73. } while (next_tick - now > cpt);
  74. /* Store (in CR16 cycles) up to when we are accounting right now. */
  75. cpuinfo->it_value = next_tick;
  76. /* Go do system house keeping. */
  77. if (cpu == 0)
  78. xtime_update(ticks_elapsed);
  79. update_process_times(user_mode(get_irq_regs()));
  80. /* Skip clockticks on purpose if we know we would miss those.
  81. * The new CR16 must be "later" than current CR16 otherwise
  82. * itimer would not fire until CR16 wrapped - e.g 4 seconds
  83. * later on a 1Ghz processor. We'll account for the missed
  84. * ticks on the next timer interrupt.
  85. * We want IT to fire modulo clocktick even if we miss/skip some.
  86. * But those interrupts don't in fact get delivered that regularly.
  87. *
  88. * "next_tick - now" will always give the difference regardless
  89. * if one or the other wrapped. If "now" is "bigger" we'll end up
  90. * with a very large unsigned number.
  91. */
  92. while (next_tick - mfctl(16) > cpt)
  93. next_tick += cpt;
  94. /* Program the IT when to deliver the next interrupt.
  95. * Only bottom 32-bits of next_tick are writable in CR16!
  96. * Timer interrupt will be delivered at least a few hundred cycles
  97. * after the IT fires, so if we are too close (<= 500 cycles) to the
  98. * next cycle, simply skip it.
  99. */
  100. if (next_tick - mfctl(16) <= 500)
  101. next_tick += cpt;
  102. mtctl(next_tick, 16);
  103. return IRQ_HANDLED;
  104. }
  105. unsigned long profile_pc(struct pt_regs *regs)
  106. {
  107. unsigned long pc = instruction_pointer(regs);
  108. if (regs->gr[0] & PSW_N)
  109. pc -= 4;
  110. #ifdef CONFIG_SMP
  111. if (in_lock_functions(pc))
  112. pc = regs->gr[2];
  113. #endif
  114. return pc;
  115. }
  116. EXPORT_SYMBOL(profile_pc);
  117. /* clock source code */
  118. static u64 notrace read_cr16(struct clocksource *cs)
  119. {
  120. return get_cycles();
  121. }
  122. static struct clocksource clocksource_cr16 = {
  123. .name = "cr16",
  124. .rating = 300,
  125. .read = read_cr16,
  126. .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
  127. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  128. };
  129. void __init start_cpu_itimer(void)
  130. {
  131. unsigned int cpu = smp_processor_id();
  132. unsigned long next_tick = mfctl(16) + clocktick;
  133. mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
  134. per_cpu(cpu_data, cpu).it_value = next_tick;
  135. }
  136. #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
  137. static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
  138. {
  139. struct pdc_tod tod_data;
  140. memset(tm, 0, sizeof(*tm));
  141. if (pdc_tod_read(&tod_data) < 0)
  142. return -EOPNOTSUPP;
  143. /* we treat tod_sec as unsigned, so this can work until year 2106 */
  144. rtc_time64_to_tm(tod_data.tod_sec, tm);
  145. return rtc_valid_tm(tm);
  146. }
  147. static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
  148. {
  149. time64_t secs = rtc_tm_to_time64(tm);
  150. if (pdc_tod_set(secs, 0) < 0)
  151. return -EOPNOTSUPP;
  152. return 0;
  153. }
  154. static const struct rtc_class_ops rtc_generic_ops = {
  155. .read_time = rtc_generic_get_time,
  156. .set_time = rtc_generic_set_time,
  157. };
  158. static int __init rtc_init(void)
  159. {
  160. struct platform_device *pdev;
  161. pdev = platform_device_register_data(NULL, "rtc-generic", -1,
  162. &rtc_generic_ops,
  163. sizeof(rtc_generic_ops));
  164. return PTR_ERR_OR_ZERO(pdev);
  165. }
  166. device_initcall(rtc_init);
  167. #endif
  168. void read_persistent_clock(struct timespec *ts)
  169. {
  170. static struct pdc_tod tod_data;
  171. if (pdc_tod_read(&tod_data) == 0) {
  172. ts->tv_sec = tod_data.tod_sec;
  173. ts->tv_nsec = tod_data.tod_usec * 1000;
  174. } else {
  175. printk(KERN_ERR "Error reading tod clock\n");
  176. ts->tv_sec = 0;
  177. ts->tv_nsec = 0;
  178. }
  179. }
  180. static u64 notrace read_cr16_sched_clock(void)
  181. {
  182. return get_cycles();
  183. }
  184. /*
  185. * timer interrupt and sched_clock() initialization
  186. */
  187. void __init time_init(void)
  188. {
  189. unsigned long cr16_hz;
  190. clocktick = (100 * PAGE0->mem_10msec) / HZ;
  191. start_cpu_itimer(); /* get CPU 0 started */
  192. cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
  193. /* register as sched_clock source */
  194. sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
  195. }
  196. static int __init init_cr16_clocksource(void)
  197. {
  198. /*
  199. * The cr16 interval timers are not syncronized across CPUs, so mark
  200. * them unstable and lower rating on SMP systems.
  201. */
  202. if (num_online_cpus() > 1) {
  203. clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
  204. clocksource_cr16.rating = 0;
  205. }
  206. /* register at clocksource framework */
  207. clocksource_register_hz(&clocksource_cr16,
  208. 100 * PAGE0->mem_10msec);
  209. return 0;
  210. }
  211. device_initcall(init_cr16_clocksource);