cgroup_pids.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Process number limiting controller for cgroups.
  3. *
  4. * Used to allow a cgroup hierarchy to stop any new processes from fork()ing
  5. * after a certain limit is reached.
  6. *
  7. * Since it is trivial to hit the task limit without hitting any kmemcg limits
  8. * in place, PIDs are a fundamental resource. As such, PID exhaustion must be
  9. * preventable in the scope of a cgroup hierarchy by allowing resource limiting
  10. * of the number of tasks in a cgroup.
  11. *
  12. * In order to use the `pids` controller, set the maximum number of tasks in
  13. * pids.max (this is not available in the root cgroup for obvious reasons). The
  14. * number of processes currently in the cgroup is given by pids.current.
  15. * Organisational operations are not blocked by cgroup policies, so it is
  16. * possible to have pids.current > pids.max. However, it is not possible to
  17. * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
  18. * would cause a cgroup policy to be violated.
  19. *
  20. * To set a cgroup to have no limit, set pids.max to "max". This is the default
  21. * for all new cgroups (N.B. that PID limits are hierarchical, so the most
  22. * stringent limit in the hierarchy is followed).
  23. *
  24. * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
  25. * a superset of parent/child/pids.current.
  26. *
  27. * Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com>
  28. *
  29. * This file is subject to the terms and conditions of version 2 of the GNU
  30. * General Public License. See the file COPYING in the main directory of the
  31. * Linux distribution for more details.
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/threads.h>
  35. #include <linux/atomic.h>
  36. #include <linux/cgroup.h>
  37. #include <linux/slab.h>
  38. #define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
  39. #define PIDS_MAX_STR "max"
  40. struct pids_cgroup {
  41. struct cgroup_subsys_state css;
  42. /*
  43. * Use 64-bit types so that we can safely represent "max" as
  44. * %PIDS_MAX = (%PID_MAX_LIMIT + 1).
  45. */
  46. atomic64_t counter;
  47. int64_t limit;
  48. };
  49. static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
  50. {
  51. return container_of(css, struct pids_cgroup, css);
  52. }
  53. static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
  54. {
  55. return css_pids(pids->css.parent);
  56. }
  57. static struct cgroup_subsys_state *
  58. pids_css_alloc(struct cgroup_subsys_state *parent)
  59. {
  60. struct pids_cgroup *pids;
  61. pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
  62. if (!pids)
  63. return ERR_PTR(-ENOMEM);
  64. pids->limit = PIDS_MAX;
  65. atomic64_set(&pids->counter, 0);
  66. return &pids->css;
  67. }
  68. static void pids_css_free(struct cgroup_subsys_state *css)
  69. {
  70. kfree(css_pids(css));
  71. }
  72. /**
  73. * pids_cancel - uncharge the local pid count
  74. * @pids: the pid cgroup state
  75. * @num: the number of pids to cancel
  76. *
  77. * This function will WARN if the pid count goes under 0, because such a case is
  78. * a bug in the pids controller proper.
  79. */
  80. static void pids_cancel(struct pids_cgroup *pids, int num)
  81. {
  82. /*
  83. * A negative count (or overflow for that matter) is invalid,
  84. * and indicates a bug in the `pids` controller proper.
  85. */
  86. WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
  87. }
  88. /**
  89. * pids_uncharge - hierarchically uncharge the pid count
  90. * @pids: the pid cgroup state
  91. * @num: the number of pids to uncharge
  92. */
  93. static void pids_uncharge(struct pids_cgroup *pids, int num)
  94. {
  95. struct pids_cgroup *p;
  96. for (p = pids; p; p = parent_pids(p))
  97. pids_cancel(p, num);
  98. }
  99. /**
  100. * pids_charge - hierarchically charge the pid count
  101. * @pids: the pid cgroup state
  102. * @num: the number of pids to charge
  103. *
  104. * This function does *not* follow the pid limit set. It cannot fail and the new
  105. * pid count may exceed the limit. This is only used for reverting failed
  106. * attaches, where there is no other way out than violating the limit.
  107. */
  108. static void pids_charge(struct pids_cgroup *pids, int num)
  109. {
  110. struct pids_cgroup *p;
  111. for (p = pids; p; p = parent_pids(p))
  112. atomic64_add(num, &p->counter);
  113. }
  114. /**
  115. * pids_try_charge - hierarchically try to charge the pid count
  116. * @pids: the pid cgroup state
  117. * @num: the number of pids to charge
  118. *
  119. * This function follows the set limit. It will fail if the charge would cause
  120. * the new value to exceed the hierarchical limit. Returns 0 if the charge
  121. * succeded, otherwise -EAGAIN.
  122. */
  123. static int pids_try_charge(struct pids_cgroup *pids, int num)
  124. {
  125. struct pids_cgroup *p, *q;
  126. for (p = pids; p; p = parent_pids(p)) {
  127. int64_t new = atomic64_add_return(num, &p->counter);
  128. /*
  129. * Since new is capped to the maximum number of pid_t, if
  130. * p->limit is %PIDS_MAX then we know that this test will never
  131. * fail.
  132. */
  133. if (new > p->limit)
  134. goto revert;
  135. }
  136. return 0;
  137. revert:
  138. for (q = pids; q != p; q = parent_pids(q))
  139. pids_cancel(q, num);
  140. pids_cancel(p, num);
  141. return -EAGAIN;
  142. }
  143. static int pids_can_attach(struct cgroup_subsys_state *css,
  144. struct cgroup_taskset *tset)
  145. {
  146. struct pids_cgroup *pids = css_pids(css);
  147. struct task_struct *task;
  148. cgroup_taskset_for_each(task, tset) {
  149. struct cgroup_subsys_state *old_css;
  150. struct pids_cgroup *old_pids;
  151. /*
  152. * No need to pin @old_css between here and cancel_attach()
  153. * because cgroup core protects it from being freed before
  154. * the migration completes or fails.
  155. */
  156. old_css = task_css(task, pids_cgrp_id);
  157. old_pids = css_pids(old_css);
  158. pids_charge(pids, 1);
  159. pids_uncharge(old_pids, 1);
  160. }
  161. return 0;
  162. }
  163. static void pids_cancel_attach(struct cgroup_subsys_state *css,
  164. struct cgroup_taskset *tset)
  165. {
  166. struct pids_cgroup *pids = css_pids(css);
  167. struct task_struct *task;
  168. cgroup_taskset_for_each(task, tset) {
  169. struct cgroup_subsys_state *old_css;
  170. struct pids_cgroup *old_pids;
  171. old_css = task_css(task, pids_cgrp_id);
  172. old_pids = css_pids(old_css);
  173. pids_charge(old_pids, 1);
  174. pids_uncharge(pids, 1);
  175. }
  176. }
  177. static int pids_can_fork(struct task_struct *task, void **priv_p)
  178. {
  179. struct cgroup_subsys_state *css;
  180. struct pids_cgroup *pids;
  181. int err;
  182. /*
  183. * Use the "current" task_css for the pids subsystem as the tentative
  184. * css. It is possible we will charge the wrong hierarchy, in which
  185. * case we will forcefully revert/reapply the charge on the right
  186. * hierarchy after it is committed to the task proper.
  187. */
  188. css = task_get_css(current, pids_cgrp_id);
  189. pids = css_pids(css);
  190. err = pids_try_charge(pids, 1);
  191. if (err)
  192. goto err_css_put;
  193. *priv_p = css;
  194. return 0;
  195. err_css_put:
  196. css_put(css);
  197. return err;
  198. }
  199. static void pids_cancel_fork(struct task_struct *task, void *priv)
  200. {
  201. struct cgroup_subsys_state *css = priv;
  202. struct pids_cgroup *pids = css_pids(css);
  203. pids_uncharge(pids, 1);
  204. css_put(css);
  205. }
  206. static void pids_fork(struct task_struct *task, void *priv)
  207. {
  208. struct cgroup_subsys_state *css;
  209. struct cgroup_subsys_state *old_css = priv;
  210. struct pids_cgroup *pids;
  211. struct pids_cgroup *old_pids = css_pids(old_css);
  212. css = task_get_css(task, pids_cgrp_id);
  213. pids = css_pids(css);
  214. /*
  215. * If the association has changed, we have to revert and reapply the
  216. * charge/uncharge on the wrong hierarchy to the current one. Since
  217. * the association can only change due to an organisation event, its
  218. * okay for us to ignore the limit in this case.
  219. */
  220. if (pids != old_pids) {
  221. pids_uncharge(old_pids, 1);
  222. pids_charge(pids, 1);
  223. }
  224. css_put(css);
  225. css_put(old_css);
  226. }
  227. static void pids_exit(struct cgroup_subsys_state *css,
  228. struct cgroup_subsys_state *old_css,
  229. struct task_struct *task)
  230. {
  231. struct pids_cgroup *pids = css_pids(old_css);
  232. pids_uncharge(pids, 1);
  233. }
  234. static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
  235. size_t nbytes, loff_t off)
  236. {
  237. struct cgroup_subsys_state *css = of_css(of);
  238. struct pids_cgroup *pids = css_pids(css);
  239. int64_t limit;
  240. int err;
  241. buf = strstrip(buf);
  242. if (!strcmp(buf, PIDS_MAX_STR)) {
  243. limit = PIDS_MAX;
  244. goto set_limit;
  245. }
  246. err = kstrtoll(buf, 0, &limit);
  247. if (err)
  248. return err;
  249. if (limit < 0 || limit >= PIDS_MAX)
  250. return -EINVAL;
  251. set_limit:
  252. /*
  253. * Limit updates don't need to be mutex'd, since it isn't
  254. * critical that any racing fork()s follow the new limit.
  255. */
  256. pids->limit = limit;
  257. return nbytes;
  258. }
  259. static int pids_max_show(struct seq_file *sf, void *v)
  260. {
  261. struct cgroup_subsys_state *css = seq_css(sf);
  262. struct pids_cgroup *pids = css_pids(css);
  263. int64_t limit = pids->limit;
  264. if (limit >= PIDS_MAX)
  265. seq_printf(sf, "%s\n", PIDS_MAX_STR);
  266. else
  267. seq_printf(sf, "%lld\n", limit);
  268. return 0;
  269. }
  270. static s64 pids_current_read(struct cgroup_subsys_state *css,
  271. struct cftype *cft)
  272. {
  273. struct pids_cgroup *pids = css_pids(css);
  274. return atomic64_read(&pids->counter);
  275. }
  276. static struct cftype pids_files[] = {
  277. {
  278. .name = "max",
  279. .write = pids_max_write,
  280. .seq_show = pids_max_show,
  281. .flags = CFTYPE_NOT_ON_ROOT,
  282. },
  283. {
  284. .name = "current",
  285. .read_s64 = pids_current_read,
  286. },
  287. { } /* terminate */
  288. };
  289. struct cgroup_subsys pids_cgrp_subsys = {
  290. .css_alloc = pids_css_alloc,
  291. .css_free = pids_css_free,
  292. .can_attach = pids_can_attach,
  293. .cancel_attach = pids_cancel_attach,
  294. .can_fork = pids_can_fork,
  295. .cancel_fork = pids_cancel_fork,
  296. .fork = pids_fork,
  297. .exit = pids_exit,
  298. .legacy_cftypes = pids_files,
  299. .dfl_cftypes = pids_files,
  300. };