cgroup.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_CGROUP_H
  3. #define _LINUX_CGROUP_H
  4. /*
  5. * cgroup interface
  6. *
  7. * Copyright (C) 2003 BULL SA
  8. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  9. *
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/rculist.h>
  15. #include <linux/cgroupstats.h>
  16. #include <linux/fs.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/kernfs.h>
  19. #include <linux/jump_label.h>
  20. #include <linux/types.h>
  21. #include <linux/ns_common.h>
  22. #include <linux/nsproxy.h>
  23. #include <linux/user_namespace.h>
  24. #include <linux/refcount.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/cgroup-defs.h>
  27. #ifdef CONFIG_CGROUPS
  28. /*
  29. * All weight knobs on the default hierarhcy should use the following min,
  30. * default and max values. The default value is the logarithmic center of
  31. * MIN and MAX and allows 100x to be expressed in both directions.
  32. */
  33. #define CGROUP_WEIGHT_MIN 1
  34. #define CGROUP_WEIGHT_DFL 100
  35. #define CGROUP_WEIGHT_MAX 10000
  36. /* walk only threadgroup leaders */
  37. #define CSS_TASK_ITER_PROCS (1U << 0)
  38. /* walk all threaded css_sets in the domain */
  39. #define CSS_TASK_ITER_THREADED (1U << 1)
  40. /* a css_task_iter should be treated as an opaque object */
  41. struct css_task_iter {
  42. struct cgroup_subsys *ss;
  43. unsigned int flags;
  44. struct list_head *cset_pos;
  45. struct list_head *cset_head;
  46. struct list_head *tcset_pos;
  47. struct list_head *tcset_head;
  48. struct list_head *task_pos;
  49. struct list_head *tasks_head;
  50. struct list_head *mg_tasks_head;
  51. struct css_set *cur_cset;
  52. struct css_set *cur_dcset;
  53. struct task_struct *cur_task;
  54. struct list_head iters_node; /* css_set->task_iters */
  55. };
  56. extern struct cgroup_root cgrp_dfl_root;
  57. extern struct css_set init_css_set;
  58. #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
  59. #include <linux/cgroup_subsys.h>
  60. #undef SUBSYS
  61. #define SUBSYS(_x) \
  62. extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
  63. extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
  64. #include <linux/cgroup_subsys.h>
  65. #undef SUBSYS
  66. /**
  67. * cgroup_subsys_enabled - fast test on whether a subsys is enabled
  68. * @ss: subsystem in question
  69. */
  70. #define cgroup_subsys_enabled(ss) \
  71. static_branch_likely(&ss ## _enabled_key)
  72. /**
  73. * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
  74. * @ss: subsystem in question
  75. */
  76. #define cgroup_subsys_on_dfl(ss) \
  77. static_branch_likely(&ss ## _on_dfl_key)
  78. bool css_has_online_children(struct cgroup_subsys_state *css);
  79. struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
  80. struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
  81. struct cgroup_subsys *ss);
  82. struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
  83. struct cgroup_subsys *ss);
  84. struct cgroup *cgroup_get_from_path(const char *path);
  85. struct cgroup *cgroup_get_from_fd(int fd);
  86. int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
  87. int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
  88. int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
  89. int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
  90. int cgroup_rm_cftypes(struct cftype *cfts);
  91. void cgroup_file_notify(struct cgroup_file *cfile);
  92. int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
  93. int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
  94. int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
  95. struct pid *pid, struct task_struct *tsk);
  96. void cgroup_fork(struct task_struct *p);
  97. extern int cgroup_can_fork(struct task_struct *p);
  98. extern void cgroup_cancel_fork(struct task_struct *p);
  99. extern void cgroup_post_fork(struct task_struct *p);
  100. void cgroup_exit(struct task_struct *p);
  101. void cgroup_free(struct task_struct *p);
  102. int cgroup_init_early(void);
  103. int cgroup_init(void);
  104. /*
  105. * Iteration helpers and macros.
  106. */
  107. struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
  108. struct cgroup_subsys_state *parent);
  109. struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
  110. struct cgroup_subsys_state *css);
  111. struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
  112. struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
  113. struct cgroup_subsys_state *css);
  114. struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
  115. struct cgroup_subsys_state **dst_cssp);
  116. struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
  117. struct cgroup_subsys_state **dst_cssp);
  118. void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
  119. struct css_task_iter *it);
  120. struct task_struct *css_task_iter_next(struct css_task_iter *it);
  121. void css_task_iter_end(struct css_task_iter *it);
  122. /**
  123. * css_for_each_child - iterate through children of a css
  124. * @pos: the css * to use as the loop cursor
  125. * @parent: css whose children to walk
  126. *
  127. * Walk @parent's children. Must be called under rcu_read_lock().
  128. *
  129. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  130. * css which finished ->css_online() is guaranteed to be visible in the
  131. * future iterations and will stay visible until the last reference is put.
  132. * A css which hasn't finished ->css_online() or already finished
  133. * ->css_offline() may show up during traversal. It's each subsystem's
  134. * responsibility to synchronize against on/offlining.
  135. *
  136. * It is allowed to temporarily drop RCU read lock during iteration. The
  137. * caller is responsible for ensuring that @pos remains accessible until
  138. * the start of the next iteration by, for example, bumping the css refcnt.
  139. */
  140. #define css_for_each_child(pos, parent) \
  141. for ((pos) = css_next_child(NULL, (parent)); (pos); \
  142. (pos) = css_next_child((pos), (parent)))
  143. /**
  144. * css_for_each_descendant_pre - pre-order walk of a css's descendants
  145. * @pos: the css * to use as the loop cursor
  146. * @root: css whose descendants to walk
  147. *
  148. * Walk @root's descendants. @root is included in the iteration and the
  149. * first node to be visited. Must be called under rcu_read_lock().
  150. *
  151. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  152. * css which finished ->css_online() is guaranteed to be visible in the
  153. * future iterations and will stay visible until the last reference is put.
  154. * A css which hasn't finished ->css_online() or already finished
  155. * ->css_offline() may show up during traversal. It's each subsystem's
  156. * responsibility to synchronize against on/offlining.
  157. *
  158. * For example, the following guarantees that a descendant can't escape
  159. * state updates of its ancestors.
  160. *
  161. * my_online(@css)
  162. * {
  163. * Lock @css's parent and @css;
  164. * Inherit state from the parent;
  165. * Unlock both.
  166. * }
  167. *
  168. * my_update_state(@css)
  169. * {
  170. * css_for_each_descendant_pre(@pos, @css) {
  171. * Lock @pos;
  172. * if (@pos == @css)
  173. * Update @css's state;
  174. * else
  175. * Verify @pos is alive and inherit state from its parent;
  176. * Unlock @pos;
  177. * }
  178. * }
  179. *
  180. * As long as the inheriting step, including checking the parent state, is
  181. * enclosed inside @pos locking, double-locking the parent isn't necessary
  182. * while inheriting. The state update to the parent is guaranteed to be
  183. * visible by walking order and, as long as inheriting operations to the
  184. * same @pos are atomic to each other, multiple updates racing each other
  185. * still result in the correct state. It's guaranateed that at least one
  186. * inheritance happens for any css after the latest update to its parent.
  187. *
  188. * If checking parent's state requires locking the parent, each inheriting
  189. * iteration should lock and unlock both @pos->parent and @pos.
  190. *
  191. * Alternatively, a subsystem may choose to use a single global lock to
  192. * synchronize ->css_online() and ->css_offline() against tree-walking
  193. * operations.
  194. *
  195. * It is allowed to temporarily drop RCU read lock during iteration. The
  196. * caller is responsible for ensuring that @pos remains accessible until
  197. * the start of the next iteration by, for example, bumping the css refcnt.
  198. */
  199. #define css_for_each_descendant_pre(pos, css) \
  200. for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
  201. (pos) = css_next_descendant_pre((pos), (css)))
  202. /**
  203. * css_for_each_descendant_post - post-order walk of a css's descendants
  204. * @pos: the css * to use as the loop cursor
  205. * @css: css whose descendants to walk
  206. *
  207. * Similar to css_for_each_descendant_pre() but performs post-order
  208. * traversal instead. @root is included in the iteration and the last
  209. * node to be visited.
  210. *
  211. * If a subsystem synchronizes ->css_online() and the start of iteration, a
  212. * css which finished ->css_online() is guaranteed to be visible in the
  213. * future iterations and will stay visible until the last reference is put.
  214. * A css which hasn't finished ->css_online() or already finished
  215. * ->css_offline() may show up during traversal. It's each subsystem's
  216. * responsibility to synchronize against on/offlining.
  217. *
  218. * Note that the walk visibility guarantee example described in pre-order
  219. * walk doesn't apply the same to post-order walks.
  220. */
  221. #define css_for_each_descendant_post(pos, css) \
  222. for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
  223. (pos) = css_next_descendant_post((pos), (css)))
  224. /**
  225. * cgroup_taskset_for_each - iterate cgroup_taskset
  226. * @task: the loop cursor
  227. * @dst_css: the destination css
  228. * @tset: taskset to iterate
  229. *
  230. * @tset may contain multiple tasks and they may belong to multiple
  231. * processes.
  232. *
  233. * On the v2 hierarchy, there may be tasks from multiple processes and they
  234. * may not share the source or destination csses.
  235. *
  236. * On traditional hierarchies, when there are multiple tasks in @tset, if a
  237. * task of a process is in @tset, all tasks of the process are in @tset.
  238. * Also, all are guaranteed to share the same source and destination csses.
  239. *
  240. * Iteration is not in any specific order.
  241. */
  242. #define cgroup_taskset_for_each(task, dst_css, tset) \
  243. for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
  244. (task); \
  245. (task) = cgroup_taskset_next((tset), &(dst_css)))
  246. /**
  247. * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
  248. * @leader: the loop cursor
  249. * @dst_css: the destination css
  250. * @tset: taskset to iterate
  251. *
  252. * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
  253. * may not contain any.
  254. */
  255. #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
  256. for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
  257. (leader); \
  258. (leader) = cgroup_taskset_next((tset), &(dst_css))) \
  259. if ((leader) != (leader)->group_leader) \
  260. ; \
  261. else
  262. /*
  263. * Inline functions.
  264. */
  265. /**
  266. * css_get - obtain a reference on the specified css
  267. * @css: target css
  268. *
  269. * The caller must already have a reference.
  270. */
  271. static inline void css_get(struct cgroup_subsys_state *css)
  272. {
  273. if (!(css->flags & CSS_NO_REF))
  274. percpu_ref_get(&css->refcnt);
  275. }
  276. /**
  277. * css_get_many - obtain references on the specified css
  278. * @css: target css
  279. * @n: number of references to get
  280. *
  281. * The caller must already have a reference.
  282. */
  283. static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
  284. {
  285. if (!(css->flags & CSS_NO_REF))
  286. percpu_ref_get_many(&css->refcnt, n);
  287. }
  288. /**
  289. * css_tryget - try to obtain a reference on the specified css
  290. * @css: target css
  291. *
  292. * Obtain a reference on @css unless it already has reached zero and is
  293. * being released. This function doesn't care whether @css is on or
  294. * offline. The caller naturally needs to ensure that @css is accessible
  295. * but doesn't have to be holding a reference on it - IOW, RCU protected
  296. * access is good enough for this function. Returns %true if a reference
  297. * count was successfully obtained; %false otherwise.
  298. */
  299. static inline bool css_tryget(struct cgroup_subsys_state *css)
  300. {
  301. if (!(css->flags & CSS_NO_REF))
  302. return percpu_ref_tryget(&css->refcnt);
  303. return true;
  304. }
  305. /**
  306. * css_tryget_online - try to obtain a reference on the specified css if online
  307. * @css: target css
  308. *
  309. * Obtain a reference on @css if it's online. The caller naturally needs
  310. * to ensure that @css is accessible but doesn't have to be holding a
  311. * reference on it - IOW, RCU protected access is good enough for this
  312. * function. Returns %true if a reference count was successfully obtained;
  313. * %false otherwise.
  314. */
  315. static inline bool css_tryget_online(struct cgroup_subsys_state *css)
  316. {
  317. if (!(css->flags & CSS_NO_REF))
  318. return percpu_ref_tryget_live(&css->refcnt);
  319. return true;
  320. }
  321. /**
  322. * css_is_dying - test whether the specified css is dying
  323. * @css: target css
  324. *
  325. * Test whether @css is in the process of offlining or already offline. In
  326. * most cases, ->css_online() and ->css_offline() callbacks should be
  327. * enough; however, the actual offline operations are RCU delayed and this
  328. * test returns %true also when @css is scheduled to be offlined.
  329. *
  330. * This is useful, for example, when the use case requires synchronous
  331. * behavior with respect to cgroup removal. cgroup removal schedules css
  332. * offlining but the css can seem alive while the operation is being
  333. * delayed. If the delay affects user visible semantics, this test can be
  334. * used to resolve the situation.
  335. */
  336. static inline bool css_is_dying(struct cgroup_subsys_state *css)
  337. {
  338. return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
  339. }
  340. /**
  341. * css_put - put a css reference
  342. * @css: target css
  343. *
  344. * Put a reference obtained via css_get() and css_tryget_online().
  345. */
  346. static inline void css_put(struct cgroup_subsys_state *css)
  347. {
  348. if (!(css->flags & CSS_NO_REF))
  349. percpu_ref_put(&css->refcnt);
  350. }
  351. /**
  352. * css_put_many - put css references
  353. * @css: target css
  354. * @n: number of references to put
  355. *
  356. * Put references obtained via css_get() and css_tryget_online().
  357. */
  358. static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
  359. {
  360. if (!(css->flags & CSS_NO_REF))
  361. percpu_ref_put_many(&css->refcnt, n);
  362. }
  363. static inline void cgroup_get(struct cgroup *cgrp)
  364. {
  365. css_get(&cgrp->self);
  366. }
  367. static inline bool cgroup_tryget(struct cgroup *cgrp)
  368. {
  369. return css_tryget(&cgrp->self);
  370. }
  371. static inline void cgroup_put(struct cgroup *cgrp)
  372. {
  373. css_put(&cgrp->self);
  374. }
  375. /**
  376. * task_css_set_check - obtain a task's css_set with extra access conditions
  377. * @task: the task to obtain css_set for
  378. * @__c: extra condition expression to be passed to rcu_dereference_check()
  379. *
  380. * A task's css_set is RCU protected, initialized and exited while holding
  381. * task_lock(), and can only be modified while holding both cgroup_mutex
  382. * and task_lock() while the task is alive. This macro verifies that the
  383. * caller is inside proper critical section and returns @task's css_set.
  384. *
  385. * The caller can also specify additional allowed conditions via @__c, such
  386. * as locks used during the cgroup_subsys::attach() methods.
  387. */
  388. #ifdef CONFIG_PROVE_RCU
  389. extern struct mutex cgroup_mutex;
  390. extern spinlock_t css_set_lock;
  391. #define task_css_set_check(task, __c) \
  392. rcu_dereference_check((task)->cgroups, \
  393. lockdep_is_held(&cgroup_mutex) || \
  394. lockdep_is_held(&css_set_lock) || \
  395. ((task)->flags & PF_EXITING) || (__c))
  396. #else
  397. #define task_css_set_check(task, __c) \
  398. rcu_dereference((task)->cgroups)
  399. #endif
  400. /**
  401. * task_css_check - obtain css for (task, subsys) w/ extra access conds
  402. * @task: the target task
  403. * @subsys_id: the target subsystem ID
  404. * @__c: extra condition expression to be passed to rcu_dereference_check()
  405. *
  406. * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
  407. * synchronization rules are the same as task_css_set_check().
  408. */
  409. #define task_css_check(task, subsys_id, __c) \
  410. task_css_set_check((task), (__c))->subsys[(subsys_id)]
  411. /**
  412. * task_css_set - obtain a task's css_set
  413. * @task: the task to obtain css_set for
  414. *
  415. * See task_css_set_check().
  416. */
  417. static inline struct css_set *task_css_set(struct task_struct *task)
  418. {
  419. return task_css_set_check(task, false);
  420. }
  421. /**
  422. * task_css - obtain css for (task, subsys)
  423. * @task: the target task
  424. * @subsys_id: the target subsystem ID
  425. *
  426. * See task_css_check().
  427. */
  428. static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
  429. int subsys_id)
  430. {
  431. return task_css_check(task, subsys_id, false);
  432. }
  433. /**
  434. * task_get_css - find and get the css for (task, subsys)
  435. * @task: the target task
  436. * @subsys_id: the target subsystem ID
  437. *
  438. * Find the css for the (@task, @subsys_id) combination, increment a
  439. * reference on and return it. This function is guaranteed to return a
  440. * valid css.
  441. */
  442. static inline struct cgroup_subsys_state *
  443. task_get_css(struct task_struct *task, int subsys_id)
  444. {
  445. struct cgroup_subsys_state *css;
  446. rcu_read_lock();
  447. while (true) {
  448. css = task_css(task, subsys_id);
  449. if (likely(css_tryget_online(css)))
  450. break;
  451. cpu_relax();
  452. }
  453. rcu_read_unlock();
  454. return css;
  455. }
  456. /**
  457. * task_css_is_root - test whether a task belongs to the root css
  458. * @task: the target task
  459. * @subsys_id: the target subsystem ID
  460. *
  461. * Test whether @task belongs to the root css on the specified subsystem.
  462. * May be invoked in any context.
  463. */
  464. static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
  465. {
  466. return task_css_check(task, subsys_id, true) ==
  467. init_css_set.subsys[subsys_id];
  468. }
  469. static inline struct cgroup *task_cgroup(struct task_struct *task,
  470. int subsys_id)
  471. {
  472. return task_css(task, subsys_id)->cgroup;
  473. }
  474. static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
  475. {
  476. return task_css_set(task)->dfl_cgrp;
  477. }
  478. static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
  479. {
  480. struct cgroup_subsys_state *parent_css = cgrp->self.parent;
  481. if (parent_css)
  482. return container_of(parent_css, struct cgroup, self);
  483. return NULL;
  484. }
  485. /**
  486. * cgroup_is_descendant - test ancestry
  487. * @cgrp: the cgroup to be tested
  488. * @ancestor: possible ancestor of @cgrp
  489. *
  490. * Test whether @cgrp is a descendant of @ancestor. It also returns %true
  491. * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
  492. * and @ancestor are accessible.
  493. */
  494. static inline bool cgroup_is_descendant(struct cgroup *cgrp,
  495. struct cgroup *ancestor)
  496. {
  497. if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
  498. return false;
  499. return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
  500. }
  501. /**
  502. * cgroup_ancestor - find ancestor of cgroup
  503. * @cgrp: cgroup to find ancestor of
  504. * @ancestor_level: level of ancestor to find starting from root
  505. *
  506. * Find ancestor of cgroup at specified level starting from root if it exists
  507. * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
  508. * @ancestor_level.
  509. *
  510. * This function is safe to call as long as @cgrp is accessible.
  511. */
  512. static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
  513. int ancestor_level)
  514. {
  515. if (cgrp->level < ancestor_level)
  516. return NULL;
  517. while (cgrp && cgrp->level > ancestor_level)
  518. cgrp = cgroup_parent(cgrp);
  519. return cgrp;
  520. }
  521. /**
  522. * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
  523. * @task: the task to be tested
  524. * @ancestor: possible ancestor of @task's cgroup
  525. *
  526. * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
  527. * It follows all the same rules as cgroup_is_descendant, and only applies
  528. * to the default hierarchy.
  529. */
  530. static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
  531. struct cgroup *ancestor)
  532. {
  533. struct css_set *cset = task_css_set(task);
  534. return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
  535. }
  536. /* no synchronization, the result can only be used as a hint */
  537. static inline bool cgroup_is_populated(struct cgroup *cgrp)
  538. {
  539. return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
  540. cgrp->nr_populated_threaded_children;
  541. }
  542. /* returns ino associated with a cgroup */
  543. static inline ino_t cgroup_ino(struct cgroup *cgrp)
  544. {
  545. return cgrp->kn->id.ino;
  546. }
  547. /* cft/css accessors for cftype->write() operation */
  548. static inline struct cftype *of_cft(struct kernfs_open_file *of)
  549. {
  550. return of->kn->priv;
  551. }
  552. struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
  553. /* cft/css accessors for cftype->seq_*() operations */
  554. static inline struct cftype *seq_cft(struct seq_file *seq)
  555. {
  556. return of_cft(seq->private);
  557. }
  558. static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
  559. {
  560. return of_css(seq->private);
  561. }
  562. /*
  563. * Name / path handling functions. All are thin wrappers around the kernfs
  564. * counterparts and can be called under any context.
  565. */
  566. static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
  567. {
  568. return kernfs_name(cgrp->kn, buf, buflen);
  569. }
  570. static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
  571. {
  572. return kernfs_path(cgrp->kn, buf, buflen);
  573. }
  574. static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
  575. {
  576. pr_cont_kernfs_name(cgrp->kn);
  577. }
  578. static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
  579. {
  580. pr_cont_kernfs_path(cgrp->kn);
  581. }
  582. static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
  583. {
  584. return &cgrp->psi;
  585. }
  586. static inline void cgroup_init_kthreadd(void)
  587. {
  588. /*
  589. * kthreadd is inherited by all kthreads, keep it in the root so
  590. * that the new kthreads are guaranteed to stay in the root until
  591. * initialization is finished.
  592. */
  593. current->no_cgroup_migration = 1;
  594. }
  595. static inline void cgroup_kthread_ready(void)
  596. {
  597. /*
  598. * This kthread finished initialization. The creator should have
  599. * set PF_NO_SETAFFINITY if this kthread should stay in the root.
  600. */
  601. current->no_cgroup_migration = 0;
  602. }
  603. static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
  604. {
  605. return &cgrp->kn->id;
  606. }
  607. void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
  608. char *buf, size_t buflen);
  609. #else /* !CONFIG_CGROUPS */
  610. struct cgroup_subsys_state;
  611. struct cgroup;
  612. static inline void css_put(struct cgroup_subsys_state *css) {}
  613. static inline int cgroup_attach_task_all(struct task_struct *from,
  614. struct task_struct *t) { return 0; }
  615. static inline int cgroupstats_build(struct cgroupstats *stats,
  616. struct dentry *dentry) { return -EINVAL; }
  617. static inline void cgroup_fork(struct task_struct *p) {}
  618. static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
  619. static inline void cgroup_cancel_fork(struct task_struct *p) {}
  620. static inline void cgroup_post_fork(struct task_struct *p) {}
  621. static inline void cgroup_exit(struct task_struct *p) {}
  622. static inline void cgroup_free(struct task_struct *p) {}
  623. static inline int cgroup_init_early(void) { return 0; }
  624. static inline int cgroup_init(void) { return 0; }
  625. static inline void cgroup_init_kthreadd(void) {}
  626. static inline void cgroup_kthread_ready(void) {}
  627. static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
  628. {
  629. return NULL;
  630. }
  631. static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
  632. {
  633. return NULL;
  634. }
  635. static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
  636. {
  637. return NULL;
  638. }
  639. static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
  640. struct cgroup *ancestor)
  641. {
  642. return true;
  643. }
  644. static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
  645. char *buf, size_t buflen) {}
  646. #endif /* !CONFIG_CGROUPS */
  647. #ifdef CONFIG_CGROUPS
  648. /*
  649. * cgroup scalable recursive statistics.
  650. */
  651. void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
  652. void cgroup_rstat_flush(struct cgroup *cgrp);
  653. void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
  654. void cgroup_rstat_flush_hold(struct cgroup *cgrp);
  655. void cgroup_rstat_flush_release(void);
  656. /*
  657. * Basic resource stats.
  658. */
  659. #ifdef CONFIG_CGROUP_CPUACCT
  660. void cpuacct_charge(struct task_struct *tsk, u64 cputime);
  661. void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
  662. #else
  663. static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
  664. static inline void cpuacct_account_field(struct task_struct *tsk, int index,
  665. u64 val) {}
  666. #endif
  667. void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
  668. void __cgroup_account_cputime_field(struct cgroup *cgrp,
  669. enum cpu_usage_stat index, u64 delta_exec);
  670. static inline void cgroup_account_cputime(struct task_struct *task,
  671. u64 delta_exec)
  672. {
  673. struct cgroup *cgrp;
  674. cpuacct_charge(task, delta_exec);
  675. rcu_read_lock();
  676. cgrp = task_dfl_cgroup(task);
  677. if (cgroup_parent(cgrp))
  678. __cgroup_account_cputime(cgrp, delta_exec);
  679. rcu_read_unlock();
  680. }
  681. static inline void cgroup_account_cputime_field(struct task_struct *task,
  682. enum cpu_usage_stat index,
  683. u64 delta_exec)
  684. {
  685. struct cgroup *cgrp;
  686. cpuacct_account_field(task, index, delta_exec);
  687. rcu_read_lock();
  688. cgrp = task_dfl_cgroup(task);
  689. if (cgroup_parent(cgrp))
  690. __cgroup_account_cputime_field(cgrp, index, delta_exec);
  691. rcu_read_unlock();
  692. }
  693. #else /* CONFIG_CGROUPS */
  694. static inline void cgroup_account_cputime(struct task_struct *task,
  695. u64 delta_exec) {}
  696. static inline void cgroup_account_cputime_field(struct task_struct *task,
  697. enum cpu_usage_stat index,
  698. u64 delta_exec) {}
  699. #endif /* CONFIG_CGROUPS */
  700. /*
  701. * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
  702. * definition in cgroup-defs.h.
  703. */
  704. #ifdef CONFIG_SOCK_CGROUP_DATA
  705. #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
  706. extern spinlock_t cgroup_sk_update_lock;
  707. #endif
  708. void cgroup_sk_alloc_disable(void);
  709. void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
  710. void cgroup_sk_free(struct sock_cgroup_data *skcd);
  711. static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
  712. {
  713. #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
  714. unsigned long v;
  715. /*
  716. * @skcd->val is 64bit but the following is safe on 32bit too as we
  717. * just need the lower ulong to be written and read atomically.
  718. */
  719. v = READ_ONCE(skcd->val);
  720. if (v & 1)
  721. return &cgrp_dfl_root.cgrp;
  722. return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
  723. #else
  724. return (struct cgroup *)(unsigned long)skcd->val;
  725. #endif
  726. }
  727. #else /* CONFIG_CGROUP_DATA */
  728. static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
  729. static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
  730. #endif /* CONFIG_CGROUP_DATA */
  731. struct cgroup_namespace {
  732. refcount_t count;
  733. struct ns_common ns;
  734. struct user_namespace *user_ns;
  735. struct ucounts *ucounts;
  736. struct css_set *root_cset;
  737. };
  738. extern struct cgroup_namespace init_cgroup_ns;
  739. #ifdef CONFIG_CGROUPS
  740. void free_cgroup_ns(struct cgroup_namespace *ns);
  741. struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
  742. struct user_namespace *user_ns,
  743. struct cgroup_namespace *old_ns);
  744. int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
  745. struct cgroup_namespace *ns);
  746. #else /* !CONFIG_CGROUPS */
  747. static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
  748. static inline struct cgroup_namespace *
  749. copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
  750. struct cgroup_namespace *old_ns)
  751. {
  752. return old_ns;
  753. }
  754. #endif /* !CONFIG_CGROUPS */
  755. static inline void get_cgroup_ns(struct cgroup_namespace *ns)
  756. {
  757. if (ns)
  758. refcount_inc(&ns->count);
  759. }
  760. static inline void put_cgroup_ns(struct cgroup_namespace *ns)
  761. {
  762. if (ns && refcount_dec_and_test(&ns->count))
  763. free_cgroup_ns(ns);
  764. }
  765. #endif /* _LINUX_CGROUP_H */