debug.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Debug controller
  3. *
  4. * WARNING: This controller is for cgroup core debugging only.
  5. * Its interfaces are unstable and subject to changes at any time.
  6. */
  7. #include <linux/ctype.h>
  8. #include <linux/mm.h>
  9. #include <linux/slab.h>
  10. #include "cgroup-internal.h"
  11. static struct cgroup_subsys_state *
  12. debug_css_alloc(struct cgroup_subsys_state *parent_css)
  13. {
  14. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  15. if (!css)
  16. return ERR_PTR(-ENOMEM);
  17. return css;
  18. }
  19. static void debug_css_free(struct cgroup_subsys_state *css)
  20. {
  21. kfree(css);
  22. }
  23. /*
  24. * debug_taskcount_read - return the number of tasks in a cgroup.
  25. * @cgrp: the cgroup in question
  26. */
  27. static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
  28. struct cftype *cft)
  29. {
  30. return cgroup_task_count(css->cgroup);
  31. }
  32. static int current_css_set_read(struct seq_file *seq, void *v)
  33. {
  34. struct kernfs_open_file *of = seq->private;
  35. struct css_set *cset;
  36. struct cgroup_subsys *ss;
  37. struct cgroup_subsys_state *css;
  38. int i, refcnt;
  39. if (!cgroup_kn_lock_live(of->kn, false))
  40. return -ENODEV;
  41. spin_lock_irq(&css_set_lock);
  42. rcu_read_lock();
  43. cset = rcu_dereference(current->cgroups);
  44. refcnt = refcount_read(&cset->refcount);
  45. seq_printf(seq, "css_set %pK %d", cset, refcnt);
  46. if (refcnt > cset->nr_tasks)
  47. seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
  48. seq_puts(seq, "\n");
  49. /*
  50. * Print the css'es stored in the current css_set.
  51. */
  52. for_each_subsys(ss, i) {
  53. css = cset->subsys[ss->id];
  54. if (!css)
  55. continue;
  56. seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
  57. (unsigned long)css, css->id);
  58. }
  59. rcu_read_unlock();
  60. spin_unlock_irq(&css_set_lock);
  61. cgroup_kn_unlock(of->kn);
  62. return 0;
  63. }
  64. static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
  65. struct cftype *cft)
  66. {
  67. u64 count;
  68. rcu_read_lock();
  69. count = refcount_read(&task_css_set(current)->refcount);
  70. rcu_read_unlock();
  71. return count;
  72. }
  73. static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
  74. {
  75. struct cgrp_cset_link *link;
  76. struct css_set *cset;
  77. char *name_buf;
  78. name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  79. if (!name_buf)
  80. return -ENOMEM;
  81. spin_lock_irq(&css_set_lock);
  82. rcu_read_lock();
  83. cset = rcu_dereference(current->cgroups);
  84. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  85. struct cgroup *c = link->cgrp;
  86. cgroup_name(c, name_buf, NAME_MAX + 1);
  87. seq_printf(seq, "Root %d group %s\n",
  88. c->root->hierarchy_id, name_buf);
  89. }
  90. rcu_read_unlock();
  91. spin_unlock_irq(&css_set_lock);
  92. kfree(name_buf);
  93. return 0;
  94. }
  95. #define MAX_TASKS_SHOWN_PER_CSS 25
  96. static int cgroup_css_links_read(struct seq_file *seq, void *v)
  97. {
  98. struct cgroup_subsys_state *css = seq_css(seq);
  99. struct cgrp_cset_link *link;
  100. int dead_cnt = 0, extra_refs = 0;
  101. spin_lock_irq(&css_set_lock);
  102. list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
  103. struct css_set *cset = link->cset;
  104. struct task_struct *task;
  105. int count = 0;
  106. int refcnt = refcount_read(&cset->refcount);
  107. seq_printf(seq, " %d", refcnt);
  108. if (refcnt - cset->nr_tasks > 0) {
  109. int extra = refcnt - cset->nr_tasks;
  110. seq_printf(seq, " +%d", extra);
  111. /*
  112. * Take out the one additional reference in
  113. * init_css_set.
  114. */
  115. if (cset == &init_css_set)
  116. extra--;
  117. extra_refs += extra;
  118. }
  119. seq_puts(seq, "\n");
  120. list_for_each_entry(task, &cset->tasks, cg_list) {
  121. if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
  122. seq_printf(seq, " task %d\n",
  123. task_pid_vnr(task));
  124. }
  125. list_for_each_entry(task, &cset->mg_tasks, cg_list) {
  126. if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
  127. seq_printf(seq, " task %d\n",
  128. task_pid_vnr(task));
  129. }
  130. /* show # of overflowed tasks */
  131. if (count > MAX_TASKS_SHOWN_PER_CSS)
  132. seq_printf(seq, " ... (%d)\n",
  133. count - MAX_TASKS_SHOWN_PER_CSS);
  134. if (cset->dead) {
  135. seq_puts(seq, " [dead]\n");
  136. dead_cnt++;
  137. }
  138. WARN_ON(count != cset->nr_tasks);
  139. }
  140. spin_unlock_irq(&css_set_lock);
  141. if (!dead_cnt && !extra_refs)
  142. return 0;
  143. seq_puts(seq, "\n");
  144. if (extra_refs)
  145. seq_printf(seq, "extra references = %d\n", extra_refs);
  146. if (dead_cnt)
  147. seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
  148. return 0;
  149. }
  150. static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
  151. {
  152. struct kernfs_open_file *of = seq->private;
  153. struct cgroup *cgrp;
  154. struct cgroup_subsys *ss;
  155. struct cgroup_subsys_state *css;
  156. char pbuf[16];
  157. int i;
  158. cgrp = cgroup_kn_lock_live(of->kn, false);
  159. if (!cgrp)
  160. return -ENODEV;
  161. for_each_subsys(ss, i) {
  162. css = rcu_dereference_check(cgrp->subsys[ss->id], true);
  163. if (!css)
  164. continue;
  165. pbuf[0] = '\0';
  166. /* Show the parent CSS if applicable*/
  167. if (css->parent)
  168. snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
  169. css->parent->id);
  170. seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
  171. (unsigned long)css, css->id,
  172. atomic_read(&css->online_cnt), pbuf);
  173. }
  174. cgroup_kn_unlock(of->kn);
  175. return 0;
  176. }
  177. static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
  178. u16 mask)
  179. {
  180. struct cgroup_subsys *ss;
  181. int ssid;
  182. bool first = true;
  183. seq_printf(seq, "%-17s: ", name);
  184. for_each_subsys(ss, ssid) {
  185. if (!(mask & (1 << ssid)))
  186. continue;
  187. if (!first)
  188. seq_puts(seq, ", ");
  189. seq_puts(seq, ss->name);
  190. first = false;
  191. }
  192. seq_putc(seq, '\n');
  193. }
  194. static int cgroup_masks_read(struct seq_file *seq, void *v)
  195. {
  196. struct kernfs_open_file *of = seq->private;
  197. struct cgroup *cgrp;
  198. cgrp = cgroup_kn_lock_live(of->kn, false);
  199. if (!cgrp)
  200. return -ENODEV;
  201. cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
  202. cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
  203. cgroup_kn_unlock(of->kn);
  204. return 0;
  205. }
  206. static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
  207. {
  208. return (!cgroup_is_populated(css->cgroup) &&
  209. !css_has_online_children(&css->cgroup->self));
  210. }
  211. static struct cftype debug_legacy_files[] = {
  212. {
  213. .name = "taskcount",
  214. .read_u64 = debug_taskcount_read,
  215. },
  216. {
  217. .name = "current_css_set",
  218. .seq_show = current_css_set_read,
  219. .flags = CFTYPE_ONLY_ON_ROOT,
  220. },
  221. {
  222. .name = "current_css_set_refcount",
  223. .read_u64 = current_css_set_refcount_read,
  224. .flags = CFTYPE_ONLY_ON_ROOT,
  225. },
  226. {
  227. .name = "current_css_set_cg_links",
  228. .seq_show = current_css_set_cg_links_read,
  229. .flags = CFTYPE_ONLY_ON_ROOT,
  230. },
  231. {
  232. .name = "cgroup_css_links",
  233. .seq_show = cgroup_css_links_read,
  234. },
  235. {
  236. .name = "cgroup_subsys_states",
  237. .seq_show = cgroup_subsys_states_read,
  238. },
  239. {
  240. .name = "cgroup_masks",
  241. .seq_show = cgroup_masks_read,
  242. },
  243. {
  244. .name = "releasable",
  245. .read_u64 = releasable_read,
  246. },
  247. { } /* terminate */
  248. };
  249. static struct cftype debug_files[] = {
  250. {
  251. .name = "taskcount",
  252. .read_u64 = debug_taskcount_read,
  253. },
  254. {
  255. .name = "current_css_set",
  256. .seq_show = current_css_set_read,
  257. .flags = CFTYPE_ONLY_ON_ROOT,
  258. },
  259. {
  260. .name = "current_css_set_refcount",
  261. .read_u64 = current_css_set_refcount_read,
  262. .flags = CFTYPE_ONLY_ON_ROOT,
  263. },
  264. {
  265. .name = "current_css_set_cg_links",
  266. .seq_show = current_css_set_cg_links_read,
  267. .flags = CFTYPE_ONLY_ON_ROOT,
  268. },
  269. {
  270. .name = "css_links",
  271. .seq_show = cgroup_css_links_read,
  272. },
  273. {
  274. .name = "csses",
  275. .seq_show = cgroup_subsys_states_read,
  276. },
  277. {
  278. .name = "masks",
  279. .seq_show = cgroup_masks_read,
  280. },
  281. { } /* terminate */
  282. };
  283. struct cgroup_subsys debug_cgrp_subsys = {
  284. .css_alloc = debug_css_alloc,
  285. .css_free = debug_css_free,
  286. .legacy_cftypes = debug_legacy_files,
  287. };
  288. /*
  289. * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
  290. * parameter.
  291. */
  292. static int __init enable_cgroup_debug(char *str)
  293. {
  294. debug_cgrp_subsys.dfl_cftypes = debug_files;
  295. debug_cgrp_subsys.implicit_on_dfl = true;
  296. return 1;
  297. }
  298. __setup("cgroup_debug", enable_cgroup_debug);