hrtimer.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /*
  2. * include/linux/hrtimer.h
  3. *
  4. * hrtimers - High-resolution kernel timers
  5. *
  6. * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
  7. * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
  8. *
  9. * data type definitions, declarations, prototypes
  10. *
  11. * Started by: Thomas Gleixner and Ingo Molnar
  12. *
  13. * For licencing details see kernel-base/COPYING
  14. */
  15. #ifndef _LINUX_HRTIMER_H
  16. #define _LINUX_HRTIMER_H
  17. #include <linux/rbtree.h>
  18. #include <linux/ktime.h>
  19. #include <linux/init.h>
  20. #include <linux/list.h>
  21. #include <linux/wait.h>
  22. #include <linux/percpu.h>
  23. #include <linux/timer.h>
  24. #include <linux/timerqueue.h>
  25. struct hrtimer_clock_base;
  26. struct hrtimer_cpu_base;
  27. /*
  28. * Mode arguments of xxx_hrtimer functions:
  29. */
  30. enum hrtimer_mode {
  31. HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
  32. HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
  33. HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
  34. HRTIMER_MODE_ABS_PINNED = 0x02,
  35. HRTIMER_MODE_REL_PINNED = 0x03,
  36. };
  37. /*
  38. * Return values for the callback function
  39. */
  40. enum hrtimer_restart {
  41. HRTIMER_NORESTART, /* Timer is not restarted */
  42. HRTIMER_RESTART, /* Timer must be restarted */
  43. };
  44. /*
  45. * Values to track state of the timer
  46. *
  47. * Possible states:
  48. *
  49. * 0x00 inactive
  50. * 0x01 enqueued into rbtree
  51. *
  52. * The callback state is not part of the timer->state because clearing it would
  53. * mean touching the timer after the callback, this makes it impossible to free
  54. * the timer from the callback function.
  55. *
  56. * Therefore we track the callback state in:
  57. *
  58. * timer->base->cpu_base->running == timer
  59. *
  60. * On SMP it is possible to have a "callback function running and enqueued"
  61. * status. It happens for example when a posix timer expired and the callback
  62. * queued a signal. Between dropping the lock which protects the posix timer
  63. * and reacquiring the base lock of the hrtimer, another CPU can deliver the
  64. * signal and rearm the timer.
  65. *
  66. * All state transitions are protected by cpu_base->lock.
  67. */
  68. #define HRTIMER_STATE_INACTIVE 0x00
  69. #define HRTIMER_STATE_ENQUEUED 0x01
  70. /**
  71. * struct hrtimer - the basic hrtimer structure
  72. * @node: timerqueue node, which also manages node.expires,
  73. * the absolute expiry time in the hrtimers internal
  74. * representation. The time is related to the clock on
  75. * which the timer is based. Is setup by adding
  76. * slack to the _softexpires value. For non range timers
  77. * identical to _softexpires.
  78. * @_softexpires: the absolute earliest expiry time of the hrtimer.
  79. * The time which was given as expiry time when the timer
  80. * was armed.
  81. * @function: timer expiry callback function
  82. * @base: pointer to the timer base (per cpu and per clock)
  83. * @state: state information (See bit values above)
  84. * @is_rel: Set if the timer was armed relative
  85. *
  86. * The hrtimer structure must be initialized by hrtimer_init()
  87. */
  88. struct hrtimer {
  89. struct timerqueue_node node;
  90. ktime_t _softexpires;
  91. enum hrtimer_restart (*function)(struct hrtimer *);
  92. struct hrtimer_clock_base *base;
  93. u8 state;
  94. u8 is_rel;
  95. };
  96. /**
  97. * struct hrtimer_sleeper - simple sleeper structure
  98. * @timer: embedded timer structure
  99. * @task: task to wake up
  100. *
  101. * task is set to NULL, when the timer expires.
  102. */
  103. struct hrtimer_sleeper {
  104. struct hrtimer timer;
  105. struct task_struct *task;
  106. };
  107. #ifdef CONFIG_64BIT
  108. # define HRTIMER_CLOCK_BASE_ALIGN 64
  109. #else
  110. # define HRTIMER_CLOCK_BASE_ALIGN 32
  111. #endif
  112. /**
  113. * struct hrtimer_clock_base - the timer base for a specific clock
  114. * @cpu_base: per cpu clock base
  115. * @index: clock type index for per_cpu support when moving a
  116. * timer to a base on another cpu.
  117. * @clockid: clock id for per_cpu support
  118. * @active: red black tree root node for the active timers
  119. * @get_time: function to retrieve the current time of the clock
  120. * @offset: offset of this clock to the monotonic base
  121. */
  122. struct hrtimer_clock_base {
  123. struct hrtimer_cpu_base *cpu_base;
  124. int index;
  125. clockid_t clockid;
  126. struct timerqueue_head active;
  127. ktime_t (*get_time)(void);
  128. ktime_t offset;
  129. } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
  130. enum hrtimer_base_type {
  131. HRTIMER_BASE_MONOTONIC,
  132. HRTIMER_BASE_REALTIME,
  133. HRTIMER_BASE_BOOTTIME,
  134. HRTIMER_BASE_TAI,
  135. HRTIMER_MAX_CLOCK_BASES,
  136. };
  137. /*
  138. * struct hrtimer_cpu_base - the per cpu clock bases
  139. * @lock: lock protecting the base and associated clock bases
  140. * and timers
  141. * @seq: seqcount around __run_hrtimer
  142. * @running: pointer to the currently running hrtimer
  143. * @cpu: cpu number
  144. * @active_bases: Bitfield to mark bases with active timers
  145. * @clock_was_set_seq: Sequence counter of clock was set events
  146. * @migration_enabled: The migration of hrtimers to other cpus is enabled
  147. * @nohz_active: The nohz functionality is enabled
  148. * @expires_next: absolute time of the next event which was scheduled
  149. * via clock_set_next_event()
  150. * @next_timer: Pointer to the first expiring timer
  151. * @in_hrtirq: hrtimer_interrupt() is currently executing
  152. * @hres_active: State of high resolution mode
  153. * @hang_detected: The last hrtimer interrupt detected a hang
  154. * @nr_events: Total number of hrtimer interrupt events
  155. * @nr_retries: Total number of hrtimer interrupt retries
  156. * @nr_hangs: Total number of hrtimer interrupt hangs
  157. * @max_hang_time: Maximum time spent in hrtimer_interrupt
  158. * @clock_base: array of clock bases for this cpu
  159. *
  160. * Note: next_timer is just an optimization for __remove_hrtimer().
  161. * Do not dereference the pointer because it is not reliable on
  162. * cross cpu removals.
  163. */
  164. struct hrtimer_cpu_base {
  165. raw_spinlock_t lock;
  166. seqcount_t seq;
  167. struct hrtimer *running;
  168. unsigned int cpu;
  169. unsigned int active_bases;
  170. unsigned int clock_was_set_seq;
  171. bool migration_enabled;
  172. bool nohz_active;
  173. #ifdef CONFIG_HIGH_RES_TIMERS
  174. unsigned int in_hrtirq : 1,
  175. hres_active : 1,
  176. hang_detected : 1;
  177. ktime_t expires_next;
  178. struct hrtimer *next_timer;
  179. unsigned int nr_events;
  180. unsigned int nr_retries;
  181. unsigned int nr_hangs;
  182. unsigned int max_hang_time;
  183. #endif
  184. struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
  185. } ____cacheline_aligned;
  186. static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
  187. {
  188. BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
  189. timer->node.expires = time;
  190. timer->_softexpires = time;
  191. }
  192. static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
  193. {
  194. timer->_softexpires = time;
  195. timer->node.expires = ktime_add_safe(time, delta);
  196. }
  197. static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
  198. {
  199. timer->_softexpires = time;
  200. timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
  201. }
  202. static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
  203. {
  204. timer->node.expires = tv64;
  205. timer->_softexpires = tv64;
  206. }
  207. static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
  208. {
  209. timer->node.expires = ktime_add_safe(timer->node.expires, time);
  210. timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
  211. }
  212. static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
  213. {
  214. timer->node.expires = ktime_add_ns(timer->node.expires, ns);
  215. timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
  216. }
  217. static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
  218. {
  219. return timer->node.expires;
  220. }
  221. static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
  222. {
  223. return timer->_softexpires;
  224. }
  225. static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
  226. {
  227. return timer->node.expires;
  228. }
  229. static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
  230. {
  231. return timer->_softexpires;
  232. }
  233. static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
  234. {
  235. return ktime_to_ns(timer->node.expires);
  236. }
  237. static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
  238. {
  239. return ktime_sub(timer->node.expires, timer->base->get_time());
  240. }
  241. static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
  242. {
  243. return timer->base->get_time();
  244. }
  245. #ifdef CONFIG_HIGH_RES_TIMERS
  246. struct clock_event_device;
  247. extern void hrtimer_interrupt(struct clock_event_device *dev);
  248. static inline int hrtimer_is_hres_active(struct hrtimer *timer)
  249. {
  250. return timer->base->cpu_base->hres_active;
  251. }
  252. extern void hrtimer_peek_ahead_timers(void);
  253. /*
  254. * The resolution of the clocks. The resolution value is returned in
  255. * the clock_getres() system call to give application programmers an
  256. * idea of the (in)accuracy of timers. Timer values are rounded up to
  257. * this resolution values.
  258. */
  259. # define HIGH_RES_NSEC 1
  260. # define KTIME_HIGH_RES (HIGH_RES_NSEC)
  261. # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
  262. # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
  263. extern void clock_was_set_delayed(void);
  264. extern unsigned int hrtimer_resolution;
  265. #else
  266. # define MONOTONIC_RES_NSEC LOW_RES_NSEC
  267. # define KTIME_MONOTONIC_RES KTIME_LOW_RES
  268. #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
  269. static inline void hrtimer_peek_ahead_timers(void) { }
  270. static inline int hrtimer_is_hres_active(struct hrtimer *timer)
  271. {
  272. return 0;
  273. }
  274. static inline void clock_was_set_delayed(void) { }
  275. #endif
  276. static inline ktime_t
  277. __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
  278. {
  279. ktime_t rem = ktime_sub(timer->node.expires, now);
  280. /*
  281. * Adjust relative timers for the extra we added in
  282. * hrtimer_start_range_ns() to prevent short timeouts.
  283. */
  284. if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
  285. rem -= hrtimer_resolution;
  286. return rem;
  287. }
  288. static inline ktime_t
  289. hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
  290. {
  291. return __hrtimer_expires_remaining_adjusted(timer,
  292. timer->base->get_time());
  293. }
  294. extern void clock_was_set(void);
  295. #ifdef CONFIG_TIMERFD
  296. extern void timerfd_clock_was_set(void);
  297. #else
  298. static inline void timerfd_clock_was_set(void) { }
  299. #endif
  300. extern void hrtimers_resume(void);
  301. DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
  302. /* Exported timer functions: */
  303. /* Initialize timers: */
  304. extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
  305. enum hrtimer_mode mode);
  306. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  307. extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
  308. enum hrtimer_mode mode);
  309. extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
  310. #else
  311. static inline void hrtimer_init_on_stack(struct hrtimer *timer,
  312. clockid_t which_clock,
  313. enum hrtimer_mode mode)
  314. {
  315. hrtimer_init(timer, which_clock, mode);
  316. }
  317. static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
  318. #endif
  319. /* Basic timer operations: */
  320. extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  321. u64 range_ns, const enum hrtimer_mode mode);
  322. /**
  323. * hrtimer_start - (re)start an hrtimer on the current CPU
  324. * @timer: the timer to be added
  325. * @tim: expiry time
  326. * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
  327. * relative (HRTIMER_MODE_REL)
  328. */
  329. static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
  330. const enum hrtimer_mode mode)
  331. {
  332. hrtimer_start_range_ns(timer, tim, 0, mode);
  333. }
  334. extern int hrtimer_cancel(struct hrtimer *timer);
  335. extern int hrtimer_try_to_cancel(struct hrtimer *timer);
  336. static inline void hrtimer_start_expires(struct hrtimer *timer,
  337. enum hrtimer_mode mode)
  338. {
  339. u64 delta;
  340. ktime_t soft, hard;
  341. soft = hrtimer_get_softexpires(timer);
  342. hard = hrtimer_get_expires(timer);
  343. delta = ktime_to_ns(ktime_sub(hard, soft));
  344. hrtimer_start_range_ns(timer, soft, delta, mode);
  345. }
  346. static inline void hrtimer_restart(struct hrtimer *timer)
  347. {
  348. hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
  349. }
  350. /* Query timers: */
  351. extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
  352. static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
  353. {
  354. return __hrtimer_get_remaining(timer, false);
  355. }
  356. extern u64 hrtimer_get_next_event(void);
  357. extern bool hrtimer_active(const struct hrtimer *timer);
  358. /*
  359. * Helper function to check, whether the timer is on one of the queues
  360. */
  361. static inline int hrtimer_is_queued(struct hrtimer *timer)
  362. {
  363. return timer->state & HRTIMER_STATE_ENQUEUED;
  364. }
  365. /*
  366. * Helper function to check, whether the timer is running the callback
  367. * function
  368. */
  369. static inline int hrtimer_callback_running(struct hrtimer *timer)
  370. {
  371. return timer->base->cpu_base->running == timer;
  372. }
  373. /* Forward a hrtimer so it expires after now: */
  374. extern u64
  375. hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
  376. /**
  377. * hrtimer_forward_now - forward the timer expiry so it expires after now
  378. * @timer: hrtimer to forward
  379. * @interval: the interval to forward
  380. *
  381. * Forward the timer expiry so it will expire after the current time
  382. * of the hrtimer clock base. Returns the number of overruns.
  383. *
  384. * Can be safely called from the callback function of @timer. If
  385. * called from other contexts @timer must neither be enqueued nor
  386. * running the callback and the caller needs to take care of
  387. * serialization.
  388. *
  389. * Note: This only updates the timer expiry value and does not requeue
  390. * the timer.
  391. */
  392. static inline u64 hrtimer_forward_now(struct hrtimer *timer,
  393. ktime_t interval)
  394. {
  395. return hrtimer_forward(timer, timer->base->get_time(), interval);
  396. }
  397. /* Precise sleep: */
  398. extern long hrtimer_nanosleep(struct timespec *rqtp,
  399. struct timespec __user *rmtp,
  400. const enum hrtimer_mode mode,
  401. const clockid_t clockid);
  402. extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
  403. extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
  404. struct task_struct *tsk);
  405. extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
  406. const enum hrtimer_mode mode);
  407. extern int schedule_hrtimeout_range_clock(ktime_t *expires,
  408. u64 delta,
  409. const enum hrtimer_mode mode,
  410. int clock);
  411. extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
  412. /* Soft interrupt function to run the hrtimer queues: */
  413. extern void hrtimer_run_queues(void);
  414. /* Bootup initialization: */
  415. extern void __init hrtimers_init(void);
  416. /* Show pending timers: */
  417. extern void sysrq_timer_list_show(void);
  418. int hrtimers_prepare_cpu(unsigned int cpu);
  419. #ifdef CONFIG_HOTPLUG_CPU
  420. int hrtimers_dead_cpu(unsigned int cpu);
  421. #else
  422. #define hrtimers_dead_cpu NULL
  423. #endif
  424. #endif