idle.h 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. #ifndef _LINUX_SCHED_IDLE_H
  2. #define _LINUX_SCHED_IDLE_H
  3. #include <linux/sched.h>
  4. enum cpu_idle_type {
  5. CPU_IDLE,
  6. CPU_NOT_IDLE,
  7. CPU_NEWLY_IDLE,
  8. CPU_MAX_IDLE_TYPES
  9. };
  10. extern void wake_up_if_idle(int cpu);
  11. /*
  12. * Idle thread specific functions to determine the need_resched
  13. * polling state.
  14. */
  15. #ifdef TIF_POLLING_NRFLAG
  16. static inline int tsk_is_polling(struct task_struct *p)
  17. {
  18. return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
  19. }
  20. static inline void __current_set_polling(void)
  21. {
  22. set_thread_flag(TIF_POLLING_NRFLAG);
  23. }
  24. static inline bool __must_check current_set_polling_and_test(void)
  25. {
  26. __current_set_polling();
  27. /*
  28. * Polling state must be visible before we test NEED_RESCHED,
  29. * paired by resched_curr()
  30. */
  31. smp_mb__after_atomic();
  32. return unlikely(tif_need_resched());
  33. }
  34. static inline void __current_clr_polling(void)
  35. {
  36. clear_thread_flag(TIF_POLLING_NRFLAG);
  37. }
  38. static inline bool __must_check current_clr_polling_and_test(void)
  39. {
  40. __current_clr_polling();
  41. /*
  42. * Polling state must be visible before we test NEED_RESCHED,
  43. * paired by resched_curr()
  44. */
  45. smp_mb__after_atomic();
  46. return unlikely(tif_need_resched());
  47. }
  48. #else
  49. static inline int tsk_is_polling(struct task_struct *p) { return 0; }
  50. static inline void __current_set_polling(void) { }
  51. static inline void __current_clr_polling(void) { }
  52. static inline bool __must_check current_set_polling_and_test(void)
  53. {
  54. return unlikely(tif_need_resched());
  55. }
  56. static inline bool __must_check current_clr_polling_and_test(void)
  57. {
  58. return unlikely(tif_need_resched());
  59. }
  60. #endif
  61. static inline void current_clr_polling(void)
  62. {
  63. __current_clr_polling();
  64. /*
  65. * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
  66. * Once the bit is cleared, we'll get IPIs with every new
  67. * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
  68. * fold.
  69. */
  70. smp_mb(); /* paired with resched_curr() */
  71. preempt_fold_need_resched();
  72. }
  73. #endif /* _LINUX_SCHED_IDLE_H */