stats.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. #ifdef CONFIG_SCHEDSTATS
  2. /*
  3. * Expects runqueue lock to be held for atomicity of update
  4. */
  5. static inline void
  6. rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  7. {
  8. if (rq) {
  9. rq->rq_sched_info.run_delay += delta;
  10. rq->rq_sched_info.pcount++;
  11. }
  12. }
  13. /*
  14. * Expects runqueue lock to be held for atomicity of update
  15. */
  16. static inline void
  17. rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  18. {
  19. if (rq)
  20. rq->rq_cpu_time += delta;
  21. }
  22. static inline void
  23. rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  24. {
  25. if (rq)
  26. rq->rq_sched_info.run_delay += delta;
  27. }
  28. # define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
  29. # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
  30. # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
  31. # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
  32. #else /* !CONFIG_SCHEDSTATS */
  33. static inline void
  34. rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  35. {}
  36. static inline void
  37. rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  38. {}
  39. static inline void
  40. rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  41. {}
  42. # define schedstat_enabled() 0
  43. # define schedstat_inc(rq, field) do { } while (0)
  44. # define schedstat_add(rq, field, amt) do { } while (0)
  45. # define schedstat_set(var, val) do { } while (0)
  46. #endif
  47. #ifdef CONFIG_SCHED_INFO
  48. static inline void sched_info_reset_dequeued(struct task_struct *t)
  49. {
  50. t->sched_info.last_queued = 0;
  51. }
  52. /*
  53. * We are interested in knowing how long it was from the *first* time a
  54. * task was queued to the time that it finally hit a cpu, we call this routine
  55. * from dequeue_task() to account for possible rq->clock skew across cpus. The
  56. * delta taken on each cpu would annul the skew.
  57. */
  58. static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
  59. {
  60. unsigned long long now = rq_clock(rq), delta = 0;
  61. if (unlikely(sched_info_on()))
  62. if (t->sched_info.last_queued)
  63. delta = now - t->sched_info.last_queued;
  64. sched_info_reset_dequeued(t);
  65. t->sched_info.run_delay += delta;
  66. rq_sched_info_dequeued(rq, delta);
  67. }
  68. /*
  69. * Called when a task finally hits the cpu. We can now calculate how
  70. * long it was waiting to run. We also note when it began so that we
  71. * can keep stats on how long its timeslice is.
  72. */
  73. static void sched_info_arrive(struct rq *rq, struct task_struct *t)
  74. {
  75. unsigned long long now = rq_clock(rq), delta = 0;
  76. if (t->sched_info.last_queued)
  77. delta = now - t->sched_info.last_queued;
  78. sched_info_reset_dequeued(t);
  79. t->sched_info.run_delay += delta;
  80. t->sched_info.last_arrival = now;
  81. t->sched_info.pcount++;
  82. rq_sched_info_arrive(rq, delta);
  83. }
  84. /*
  85. * This function is only called from enqueue_task(), but also only updates
  86. * the timestamp if it is already not set. It's assumed that
  87. * sched_info_dequeued() will clear that stamp when appropriate.
  88. */
  89. static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
  90. {
  91. if (unlikely(sched_info_on()))
  92. if (!t->sched_info.last_queued)
  93. t->sched_info.last_queued = rq_clock(rq);
  94. }
  95. /*
  96. * Called when a process ceases being the active-running process involuntarily
  97. * due, typically, to expiring its time slice (this may also be called when
  98. * switching to the idle task). Now we can calculate how long we ran.
  99. * Also, if the process is still in the TASK_RUNNING state, call
  100. * sched_info_queued() to mark that it has now again started waiting on
  101. * the runqueue.
  102. */
  103. static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
  104. {
  105. unsigned long long delta = rq_clock(rq) -
  106. t->sched_info.last_arrival;
  107. rq_sched_info_depart(rq, delta);
  108. if (t->state == TASK_RUNNING)
  109. sched_info_queued(rq, t);
  110. }
  111. /*
  112. * Called when tasks are switched involuntarily due, typically, to expiring
  113. * their time slice. (This may also be called when switching to or from
  114. * the idle task.) We are only called when prev != next.
  115. */
  116. static inline void
  117. __sched_info_switch(struct rq *rq,
  118. struct task_struct *prev, struct task_struct *next)
  119. {
  120. /*
  121. * prev now departs the cpu. It's not interesting to record
  122. * stats about how efficient we were at scheduling the idle
  123. * process, however.
  124. */
  125. if (prev != rq->idle)
  126. sched_info_depart(rq, prev);
  127. if (next != rq->idle)
  128. sched_info_arrive(rq, next);
  129. }
  130. static inline void
  131. sched_info_switch(struct rq *rq,
  132. struct task_struct *prev, struct task_struct *next)
  133. {
  134. if (unlikely(sched_info_on()))
  135. __sched_info_switch(rq, prev, next);
  136. }
  137. #else
  138. #define sched_info_queued(rq, t) do { } while (0)
  139. #define sched_info_reset_dequeued(t) do { } while (0)
  140. #define sched_info_dequeued(rq, t) do { } while (0)
  141. #define sched_info_depart(rq, t) do { } while (0)
  142. #define sched_info_arrive(rq, next) do { } while (0)
  143. #define sched_info_switch(rq, t, next) do { } while (0)
  144. #endif /* CONFIG_SCHED_INFO */
  145. /*
  146. * The following are functions that support scheduler-internal time accounting.
  147. * These functions are generally called at the timer tick. None of this depends
  148. * on CONFIG_SCHEDSTATS.
  149. */
  150. /**
  151. * cputimer_running - return true if cputimer is running
  152. *
  153. * @tsk: Pointer to target task.
  154. */
  155. static inline bool cputimer_running(struct task_struct *tsk)
  156. {
  157. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  158. /* Check if cputimer isn't running. This is accessed without locking. */
  159. if (!READ_ONCE(cputimer->running))
  160. return false;
  161. /*
  162. * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  163. * in __exit_signal(), we won't account to the signal struct further
  164. * cputime consumed by that task, even though the task can still be
  165. * ticking after __exit_signal().
  166. *
  167. * In order to keep a consistent behaviour between thread group cputime
  168. * and thread group cputimer accounting, lets also ignore the cputime
  169. * elapsing after __exit_signal() in any thread group timer running.
  170. *
  171. * This makes sure that POSIX CPU clocks and timers are synchronized, so
  172. * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
  173. * clock delta is behind the expiring timer value.
  174. */
  175. if (unlikely(!tsk->sighand))
  176. return false;
  177. return true;
  178. }
  179. /**
  180. * account_group_user_time - Maintain utime for a thread group.
  181. *
  182. * @tsk: Pointer to task structure.
  183. * @cputime: Time value by which to increment the utime field of the
  184. * thread_group_cputime structure.
  185. *
  186. * If thread group time is being maintained, get the structure for the
  187. * running CPU and update the utime field there.
  188. */
  189. static inline void account_group_user_time(struct task_struct *tsk,
  190. cputime_t cputime)
  191. {
  192. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  193. if (!cputimer_running(tsk))
  194. return;
  195. atomic64_add(cputime, &cputimer->cputime_atomic.utime);
  196. }
  197. /**
  198. * account_group_system_time - Maintain stime for a thread group.
  199. *
  200. * @tsk: Pointer to task structure.
  201. * @cputime: Time value by which to increment the stime field of the
  202. * thread_group_cputime structure.
  203. *
  204. * If thread group time is being maintained, get the structure for the
  205. * running CPU and update the stime field there.
  206. */
  207. static inline void account_group_system_time(struct task_struct *tsk,
  208. cputime_t cputime)
  209. {
  210. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  211. if (!cputimer_running(tsk))
  212. return;
  213. atomic64_add(cputime, &cputimer->cputime_atomic.stime);
  214. }
  215. /**
  216. * account_group_exec_runtime - Maintain exec runtime for a thread group.
  217. *
  218. * @tsk: Pointer to task structure.
  219. * @ns: Time value by which to increment the sum_exec_runtime field
  220. * of the thread_group_cputime structure.
  221. *
  222. * If thread group time is being maintained, get the structure for the
  223. * running CPU and update the sum_exec_runtime field there.
  224. */
  225. static inline void account_group_exec_runtime(struct task_struct *tsk,
  226. unsigned long long ns)
  227. {
  228. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  229. if (!cputimer_running(tsk))
  230. return;
  231. atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
  232. }