cpuacct.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. #include <linux/cgroup.h>
  2. #include <linux/slab.h>
  3. #include <linux/percpu.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/kernel_stat.h>
  9. #include <linux/err.h>
  10. #include "sched.h"
  11. /*
  12. * CPU accounting code for task groups.
  13. *
  14. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  15. * (balbir@in.ibm.com).
  16. */
  17. /* Time spent by the tasks of the cpu accounting group executing in ... */
  18. enum cpuacct_stat_index {
  19. CPUACCT_STAT_USER, /* ... user mode */
  20. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  21. CPUACCT_STAT_NSTATS,
  22. };
  23. /* track cpu usage of a group of tasks and its child groups */
  24. struct cpuacct {
  25. struct cgroup_subsys_state css;
  26. /* cpuusage holds pointer to a u64-type object on every cpu */
  27. u64 __percpu *cpuusage;
  28. struct kernel_cpustat __percpu *cpustat;
  29. };
  30. static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
  31. {
  32. return css ? container_of(css, struct cpuacct, css) : NULL;
  33. }
  34. /* return cpu accounting group to which this task belongs */
  35. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  36. {
  37. return css_ca(task_css(tsk, cpuacct_cgrp_id));
  38. }
  39. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  40. {
  41. return css_ca(ca->css.parent);
  42. }
  43. static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
  44. static struct cpuacct root_cpuacct = {
  45. .cpustat = &kernel_cpustat,
  46. .cpuusage = &root_cpuacct_cpuusage,
  47. };
  48. /* create a new cpu accounting group */
  49. static struct cgroup_subsys_state *
  50. cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
  51. {
  52. struct cpuacct *ca;
  53. if (!parent_css)
  54. return &root_cpuacct.css;
  55. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  56. if (!ca)
  57. goto out;
  58. ca->cpuusage = alloc_percpu(u64);
  59. if (!ca->cpuusage)
  60. goto out_free_ca;
  61. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  62. if (!ca->cpustat)
  63. goto out_free_cpuusage;
  64. return &ca->css;
  65. out_free_cpuusage:
  66. free_percpu(ca->cpuusage);
  67. out_free_ca:
  68. kfree(ca);
  69. out:
  70. return ERR_PTR(-ENOMEM);
  71. }
  72. /* destroy an existing cpu accounting group */
  73. static void cpuacct_css_free(struct cgroup_subsys_state *css)
  74. {
  75. struct cpuacct *ca = css_ca(css);
  76. free_percpu(ca->cpustat);
  77. free_percpu(ca->cpuusage);
  78. kfree(ca);
  79. }
  80. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  81. {
  82. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  83. u64 data;
  84. #ifndef CONFIG_64BIT
  85. /*
  86. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  87. */
  88. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  89. data = *cpuusage;
  90. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  91. #else
  92. data = *cpuusage;
  93. #endif
  94. return data;
  95. }
  96. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  97. {
  98. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  99. #ifndef CONFIG_64BIT
  100. /*
  101. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  102. */
  103. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  104. *cpuusage = val;
  105. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  106. #else
  107. *cpuusage = val;
  108. #endif
  109. }
  110. /* return total cpu usage (in nanoseconds) of a group */
  111. static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
  112. {
  113. struct cpuacct *ca = css_ca(css);
  114. u64 totalcpuusage = 0;
  115. int i;
  116. for_each_present_cpu(i)
  117. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  118. return totalcpuusage;
  119. }
  120. static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
  121. u64 val)
  122. {
  123. struct cpuacct *ca = css_ca(css);
  124. int err = 0;
  125. int i;
  126. /*
  127. * Only allow '0' here to do a reset.
  128. */
  129. if (val) {
  130. err = -EINVAL;
  131. goto out;
  132. }
  133. for_each_present_cpu(i)
  134. cpuacct_cpuusage_write(ca, i, 0);
  135. out:
  136. return err;
  137. }
  138. static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
  139. {
  140. struct cpuacct *ca = css_ca(seq_css(m));
  141. u64 percpu;
  142. int i;
  143. for_each_present_cpu(i) {
  144. percpu = cpuacct_cpuusage_read(ca, i);
  145. seq_printf(m, "%llu ", (unsigned long long) percpu);
  146. }
  147. seq_printf(m, "\n");
  148. return 0;
  149. }
  150. static const char * const cpuacct_stat_desc[] = {
  151. [CPUACCT_STAT_USER] = "user",
  152. [CPUACCT_STAT_SYSTEM] = "system",
  153. };
  154. static int cpuacct_stats_show(struct seq_file *sf, void *v)
  155. {
  156. struct cpuacct *ca = css_ca(seq_css(sf));
  157. int cpu;
  158. s64 val = 0;
  159. for_each_online_cpu(cpu) {
  160. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  161. val += kcpustat->cpustat[CPUTIME_USER];
  162. val += kcpustat->cpustat[CPUTIME_NICE];
  163. }
  164. val = cputime64_to_clock_t(val);
  165. seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_USER], val);
  166. val = 0;
  167. for_each_online_cpu(cpu) {
  168. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  169. val += kcpustat->cpustat[CPUTIME_SYSTEM];
  170. val += kcpustat->cpustat[CPUTIME_IRQ];
  171. val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
  172. }
  173. val = cputime64_to_clock_t(val);
  174. seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
  175. return 0;
  176. }
  177. static struct cftype files[] = {
  178. {
  179. .name = "usage",
  180. .read_u64 = cpuusage_read,
  181. .write_u64 = cpuusage_write,
  182. },
  183. {
  184. .name = "usage_percpu",
  185. .seq_show = cpuacct_percpu_seq_show,
  186. },
  187. {
  188. .name = "stat",
  189. .seq_show = cpuacct_stats_show,
  190. },
  191. { } /* terminate */
  192. };
  193. /*
  194. * charge this task's execution time to its accounting group.
  195. *
  196. * called with rq->lock held.
  197. */
  198. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  199. {
  200. struct cpuacct *ca;
  201. rcu_read_lock();
  202. for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
  203. *this_cpu_ptr(ca->cpuusage) += cputime;
  204. rcu_read_unlock();
  205. }
  206. /*
  207. * Add user/system time to cpuacct.
  208. *
  209. * Note: it's the caller that updates the account of the root cgroup.
  210. */
  211. void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
  212. {
  213. struct cpuacct *ca;
  214. rcu_read_lock();
  215. for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
  216. this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
  217. rcu_read_unlock();
  218. }
  219. struct cgroup_subsys cpuacct_cgrp_subsys = {
  220. .css_alloc = cpuacct_css_alloc,
  221. .css_free = cpuacct_css_free,
  222. .legacy_cftypes = files,
  223. .early_init = true,
  224. };