preempt.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #ifndef __ASM_PREEMPT_H
  2. #define __ASM_PREEMPT_H
  3. #include <linux/thread_info.h>
  4. #define PREEMPT_ENABLED (0)
  5. static __always_inline int preempt_count(void)
  6. {
  7. return current_thread_info()->preempt_count;
  8. }
  9. static __always_inline int *preempt_count_ptr(void)
  10. {
  11. return &current_thread_info()->preempt_count;
  12. }
  13. static __always_inline void preempt_count_set(int pc)
  14. {
  15. *preempt_count_ptr() = pc;
  16. }
  17. /*
  18. * must be macros to avoid header recursion hell
  19. */
  20. #define task_preempt_count(p) \
  21. (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED)
  22. #define init_task_preempt_count(p) do { \
  23. task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
  24. } while (0)
  25. #define init_idle_preempt_count(p, cpu) do { \
  26. task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
  27. } while (0)
  28. static __always_inline void set_preempt_need_resched(void)
  29. {
  30. }
  31. static __always_inline void clear_preempt_need_resched(void)
  32. {
  33. }
  34. static __always_inline bool test_preempt_need_resched(void)
  35. {
  36. return false;
  37. }
  38. /*
  39. * The various preempt_count add/sub methods
  40. */
  41. static __always_inline void __preempt_count_add(int val)
  42. {
  43. *preempt_count_ptr() += val;
  44. }
  45. static __always_inline void __preempt_count_sub(int val)
  46. {
  47. *preempt_count_ptr() -= val;
  48. }
  49. static __always_inline bool __preempt_count_dec_and_test(void)
  50. {
  51. /*
  52. * Because of load-store architectures cannot do per-cpu atomic
  53. * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
  54. * lost.
  55. */
  56. return !--*preempt_count_ptr() && tif_need_resched();
  57. }
  58. /*
  59. * Returns true when we need to resched and can (barring IRQ state).
  60. */
  61. static __always_inline bool should_resched(void)
  62. {
  63. return unlikely(!preempt_count() && tif_need_resched());
  64. }
  65. #ifdef CONFIG_PREEMPT
  66. extern asmlinkage void preempt_schedule(void);
  67. #define __preempt_schedule() preempt_schedule()
  68. #ifdef CONFIG_CONTEXT_TRACKING
  69. extern asmlinkage void preempt_schedule_context(void);
  70. #define __preempt_schedule_context() preempt_schedule_context()
  71. #endif
  72. #endif /* CONFIG_PREEMPT */
  73. #endif /* __ASM_PREEMPT_H */