clock.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #ifndef _LINUX_SCHED_CLOCK_H
  2. #define _LINUX_SCHED_CLOCK_H
  3. #include <linux/sched.h>
  4. /*
  5. * Do not use outside of architecture code which knows its limitations.
  6. *
  7. * sched_clock() has no promise of monotonicity or bounded drift between
  8. * CPUs, use (which you should not) requires disabling IRQs.
  9. *
  10. * Please use one of the three interfaces below.
  11. */
  12. extern unsigned long long notrace sched_clock(void);
  13. /*
  14. * See the comment in kernel/sched/clock.c
  15. */
  16. extern u64 running_clock(void);
  17. extern u64 sched_clock_cpu(int cpu);
  18. extern void sched_clock_init(void);
  19. #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  20. static inline void sched_clock_init_late(void)
  21. {
  22. }
  23. static inline void sched_clock_tick(void)
  24. {
  25. }
  26. static inline void clear_sched_clock_stable(void)
  27. {
  28. }
  29. static inline void sched_clock_idle_sleep_event(void)
  30. {
  31. }
  32. static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
  33. {
  34. }
  35. static inline u64 cpu_clock(int cpu)
  36. {
  37. return sched_clock();
  38. }
  39. static inline u64 local_clock(void)
  40. {
  41. return sched_clock();
  42. }
  43. #else
  44. extern void sched_clock_init_late(void);
  45. /*
  46. * Architectures can set this to 1 if they have specified
  47. * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
  48. * but then during bootup it turns out that sched_clock()
  49. * is reliable after all:
  50. */
  51. extern int sched_clock_stable(void);
  52. extern void clear_sched_clock_stable(void);
  53. extern void sched_clock_tick(void);
  54. extern void sched_clock_idle_sleep_event(void);
  55. extern void sched_clock_idle_wakeup_event(u64 delta_ns);
  56. /*
  57. * As outlined in clock.c, provides a fast, high resolution, nanosecond
  58. * time source that is monotonic per cpu argument and has bounded drift
  59. * between cpus.
  60. *
  61. * ######################### BIG FAT WARNING ##########################
  62. * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
  63. * # go backwards !! #
  64. * ####################################################################
  65. */
  66. static inline u64 cpu_clock(int cpu)
  67. {
  68. return sched_clock_cpu(cpu);
  69. }
  70. static inline u64 local_clock(void)
  71. {
  72. return sched_clock_cpu(raw_smp_processor_id());
  73. }
  74. #endif
  75. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  76. /*
  77. * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
  78. * The reason for this explicit opt-in is not to have perf penalty with
  79. * slow sched_clocks.
  80. */
  81. extern void enable_sched_clock_irqtime(void);
  82. extern void disable_sched_clock_irqtime(void);
  83. #else
  84. static inline void enable_sched_clock_irqtime(void) {}
  85. static inline void disable_sched_clock_irqtime(void) {}
  86. #endif
  87. #endif /* _LINUX_SCHED_CLOCK_H */