context_tracking.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. #ifndef _LINUX_CONTEXT_TRACKING_H
  2. #define _LINUX_CONTEXT_TRACKING_H
  3. #include <linux/sched.h>
  4. #include <linux/percpu.h>
  5. #include <linux/vtime.h>
  6. #include <asm/ptrace.h>
  7. struct context_tracking {
  8. /*
  9. * When active is false, probes are unset in order
  10. * to minimize overhead: TIF flags are cleared
  11. * and calls to user_enter/exit are ignored. This
  12. * may be further optimized using static keys.
  13. */
  14. bool active;
  15. enum ctx_state {
  16. IN_KERNEL = 0,
  17. IN_USER,
  18. } state;
  19. };
  20. #ifdef CONFIG_CONTEXT_TRACKING
  21. DECLARE_PER_CPU(struct context_tracking, context_tracking);
  22. static inline bool context_tracking_in_user(void)
  23. {
  24. return __this_cpu_read(context_tracking.state) == IN_USER;
  25. }
  26. static inline bool context_tracking_active(void)
  27. {
  28. return __this_cpu_read(context_tracking.active);
  29. }
  30. extern void context_tracking_cpu_set(int cpu);
  31. extern void user_enter(void);
  32. extern void user_exit(void);
  33. static inline enum ctx_state exception_enter(void)
  34. {
  35. enum ctx_state prev_ctx;
  36. prev_ctx = this_cpu_read(context_tracking.state);
  37. user_exit();
  38. return prev_ctx;
  39. }
  40. static inline void exception_exit(enum ctx_state prev_ctx)
  41. {
  42. if (prev_ctx == IN_USER)
  43. user_enter();
  44. }
  45. extern void context_tracking_task_switch(struct task_struct *prev,
  46. struct task_struct *next);
  47. #else
  48. static inline bool context_tracking_in_user(void) { return false; }
  49. static inline void user_enter(void) { }
  50. static inline void user_exit(void) { }
  51. static inline enum ctx_state exception_enter(void) { return 0; }
  52. static inline void exception_exit(enum ctx_state prev_ctx) { }
  53. static inline void context_tracking_task_switch(struct task_struct *prev,
  54. struct task_struct *next) { }
  55. #endif /* !CONFIG_CONTEXT_TRACKING */
  56. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  57. extern void guest_enter(void);
  58. extern void guest_exit(void);
  59. #else
  60. static inline void guest_enter(void)
  61. {
  62. /*
  63. * This is running in ioctl context so its safe
  64. * to assume that it's the stime pending cputime
  65. * to flush.
  66. */
  67. vtime_account_system(current);
  68. current->flags |= PF_VCPU;
  69. }
  70. static inline void guest_exit(void)
  71. {
  72. /* Flush the guest cputime we spent on the guest */
  73. vtime_account_system(current);
  74. current->flags &= ~PF_VCPU;
  75. }
  76. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
  77. #endif