memcontrol.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/jump_label.h>
  25. #include <linux/page_counter.h>
  26. #include <linux/vmpressure.h>
  27. #include <linux/eventfd.h>
  28. #include <linux/mm.h>
  29. #include <linux/vmstat.h>
  30. #include <linux/writeback.h>
  31. #include <linux/page-flags.h>
  32. struct mem_cgroup;
  33. struct page;
  34. struct mm_struct;
  35. struct kmem_cache;
  36. /* Cgroup-specific page state, on top of universal node page state */
  37. enum memcg_stat_item {
  38. MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  39. MEMCG_RSS,
  40. MEMCG_RSS_HUGE,
  41. MEMCG_SWAP,
  42. MEMCG_SOCK,
  43. /* XXX: why are these zone and not node counters? */
  44. MEMCG_KERNEL_STACK_KB,
  45. MEMCG_NR_STAT,
  46. };
  47. enum memcg_memory_event {
  48. MEMCG_LOW,
  49. MEMCG_HIGH,
  50. MEMCG_MAX,
  51. MEMCG_OOM,
  52. MEMCG_OOM_KILL,
  53. MEMCG_SWAP_MAX,
  54. MEMCG_SWAP_FAIL,
  55. MEMCG_NR_MEMORY_EVENTS,
  56. };
  57. enum mem_cgroup_protection {
  58. MEMCG_PROT_NONE,
  59. MEMCG_PROT_LOW,
  60. MEMCG_PROT_MIN,
  61. };
  62. struct mem_cgroup_reclaim_cookie {
  63. pg_data_t *pgdat;
  64. int priority;
  65. unsigned int generation;
  66. };
  67. #ifdef CONFIG_MEMCG
  68. #define MEM_CGROUP_ID_SHIFT 16
  69. #define MEM_CGROUP_ID_MAX USHRT_MAX
  70. struct mem_cgroup_id {
  71. int id;
  72. atomic_t ref;
  73. };
  74. /*
  75. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  76. * it will be incremated by the number of pages. This counter is used for
  77. * for trigger some periodic events. This is straightforward and better
  78. * than using jiffies etc. to handle periodic memcg event.
  79. */
  80. enum mem_cgroup_events_target {
  81. MEM_CGROUP_TARGET_THRESH,
  82. MEM_CGROUP_TARGET_SOFTLIMIT,
  83. MEM_CGROUP_TARGET_NUMAINFO,
  84. MEM_CGROUP_NTARGETS,
  85. };
  86. struct mem_cgroup_stat_cpu {
  87. long count[MEMCG_NR_STAT];
  88. unsigned long events[NR_VM_EVENT_ITEMS];
  89. unsigned long nr_page_events;
  90. unsigned long targets[MEM_CGROUP_NTARGETS];
  91. };
  92. struct mem_cgroup_reclaim_iter {
  93. struct mem_cgroup *position;
  94. /* scan generation, increased every round-trip */
  95. unsigned int generation;
  96. };
  97. struct lruvec_stat {
  98. long count[NR_VM_NODE_STAT_ITEMS];
  99. };
  100. /*
  101. * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
  102. * which have elements charged to this memcg.
  103. */
  104. struct memcg_shrinker_map {
  105. struct rcu_head rcu;
  106. unsigned long map[0];
  107. };
  108. /*
  109. * per-zone information in memory controller.
  110. */
  111. struct mem_cgroup_per_node {
  112. struct lruvec lruvec;
  113. struct lruvec_stat __percpu *lruvec_stat_cpu;
  114. atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
  115. unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
  116. struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
  117. #ifdef CONFIG_MEMCG_KMEM
  118. struct memcg_shrinker_map __rcu *shrinker_map;
  119. #endif
  120. struct rb_node tree_node; /* RB tree node */
  121. unsigned long usage_in_excess;/* Set to the value by which */
  122. /* the soft limit is exceeded*/
  123. bool on_tree;
  124. bool congested; /* memcg has many dirty pages */
  125. /* backed by a congested BDI */
  126. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  127. /* use container_of */
  128. };
  129. struct mem_cgroup_threshold {
  130. struct eventfd_ctx *eventfd;
  131. unsigned long threshold;
  132. };
  133. /* For threshold */
  134. struct mem_cgroup_threshold_ary {
  135. /* An array index points to threshold just below or equal to usage. */
  136. int current_threshold;
  137. /* Size of entries[] */
  138. unsigned int size;
  139. /* Array of thresholds */
  140. struct mem_cgroup_threshold entries[0];
  141. };
  142. struct mem_cgroup_thresholds {
  143. /* Primary thresholds array */
  144. struct mem_cgroup_threshold_ary *primary;
  145. /*
  146. * Spare threshold array.
  147. * This is needed to make mem_cgroup_unregister_event() "never fail".
  148. * It must be able to store at least primary->size - 1 entries.
  149. */
  150. struct mem_cgroup_threshold_ary *spare;
  151. };
  152. enum memcg_kmem_state {
  153. KMEM_NONE,
  154. KMEM_ALLOCATED,
  155. KMEM_ONLINE,
  156. };
  157. #if defined(CONFIG_SMP)
  158. struct memcg_padding {
  159. char x[0];
  160. } ____cacheline_internodealigned_in_smp;
  161. #define MEMCG_PADDING(name) struct memcg_padding name;
  162. #else
  163. #define MEMCG_PADDING(name)
  164. #endif
  165. /*
  166. * The memory controller data structure. The memory controller controls both
  167. * page cache and RSS per cgroup. We would eventually like to provide
  168. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  169. * to help the administrator determine what knobs to tune.
  170. */
  171. struct mem_cgroup {
  172. struct cgroup_subsys_state css;
  173. /* Private memcg ID. Used to ID objects that outlive the cgroup */
  174. struct mem_cgroup_id id;
  175. /* Accounted resources */
  176. struct page_counter memory;
  177. struct page_counter swap;
  178. /* Legacy consumer-oriented counters */
  179. struct page_counter memsw;
  180. struct page_counter kmem;
  181. struct page_counter tcpmem;
  182. /* Upper bound of normal memory consumption range */
  183. unsigned long high;
  184. /* Range enforcement for interrupt charges */
  185. struct work_struct high_work;
  186. unsigned long soft_limit;
  187. /* vmpressure notifications */
  188. struct vmpressure vmpressure;
  189. /*
  190. * Should the accounting and control be hierarchical, per subtree?
  191. */
  192. bool use_hierarchy;
  193. /* protected by memcg_oom_lock */
  194. bool oom_lock;
  195. int under_oom;
  196. int swappiness;
  197. /* OOM-Killer disable */
  198. int oom_kill_disable;
  199. /* memory.events */
  200. struct cgroup_file events_file;
  201. /* handle for "memory.swap.events" */
  202. struct cgroup_file swap_events_file;
  203. /* protect arrays of thresholds */
  204. struct mutex thresholds_lock;
  205. /* thresholds for memory usage. RCU-protected */
  206. struct mem_cgroup_thresholds thresholds;
  207. /* thresholds for mem+swap usage. RCU-protected */
  208. struct mem_cgroup_thresholds memsw_thresholds;
  209. /* For oom notifier event fd */
  210. struct list_head oom_notify;
  211. /*
  212. * Should we move charges of a task when a task is moved into this
  213. * mem_cgroup ? And what type of charges should we move ?
  214. */
  215. unsigned long move_charge_at_immigrate;
  216. /* taken only while moving_account > 0 */
  217. spinlock_t move_lock;
  218. unsigned long move_lock_flags;
  219. MEMCG_PADDING(_pad1_);
  220. /*
  221. * set > 0 if pages under this cgroup are moving to other cgroup.
  222. */
  223. atomic_t moving_account;
  224. struct task_struct *move_lock_task;
  225. /* memory.stat */
  226. struct mem_cgroup_stat_cpu __percpu *stat_cpu;
  227. MEMCG_PADDING(_pad2_);
  228. atomic_long_t stat[MEMCG_NR_STAT];
  229. atomic_long_t events[NR_VM_EVENT_ITEMS];
  230. atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
  231. unsigned long socket_pressure;
  232. /* Legacy tcp memory accounting */
  233. bool tcpmem_active;
  234. int tcpmem_pressure;
  235. #ifdef CONFIG_MEMCG_KMEM
  236. /* Index in the kmem_cache->memcg_params.memcg_caches array */
  237. int kmemcg_id;
  238. enum memcg_kmem_state kmem_state;
  239. struct list_head kmem_caches;
  240. #endif
  241. int last_scanned_node;
  242. #if MAX_NUMNODES > 1
  243. nodemask_t scan_nodes;
  244. atomic_t numainfo_events;
  245. atomic_t numainfo_updating;
  246. #endif
  247. #ifdef CONFIG_CGROUP_WRITEBACK
  248. struct list_head cgwb_list;
  249. struct wb_domain cgwb_domain;
  250. #endif
  251. /* List of events which userspace want to receive */
  252. struct list_head event_list;
  253. spinlock_t event_list_lock;
  254. struct mem_cgroup_per_node *nodeinfo[0];
  255. /* WARNING: nodeinfo must be the last member here */
  256. };
  257. /*
  258. * size of first charge trial. "32" comes from vmscan.c's magic value.
  259. * TODO: maybe necessary to use big numbers in big irons.
  260. */
  261. #define MEMCG_CHARGE_BATCH 32U
  262. extern struct mem_cgroup *root_mem_cgroup;
  263. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  264. {
  265. return (memcg == root_mem_cgroup);
  266. }
  267. static inline bool mem_cgroup_disabled(void)
  268. {
  269. return !cgroup_subsys_enabled(memory_cgrp_subsys);
  270. }
  271. enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
  272. struct mem_cgroup *memcg);
  273. int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  274. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  275. bool compound);
  276. int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
  277. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  278. bool compound);
  279. void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  280. bool lrucare, bool compound);
  281. void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
  282. bool compound);
  283. void mem_cgroup_uncharge(struct page *page);
  284. void mem_cgroup_uncharge_list(struct list_head *page_list);
  285. void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
  286. static struct mem_cgroup_per_node *
  287. mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
  288. {
  289. return memcg->nodeinfo[nid];
  290. }
  291. /**
  292. * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
  293. * @node: node of the wanted lruvec
  294. * @memcg: memcg of the wanted lruvec
  295. *
  296. * Returns the lru list vector holding pages for a given @node or a given
  297. * @memcg and @zone. This can be the node lruvec, if the memory controller
  298. * is disabled.
  299. */
  300. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  301. struct mem_cgroup *memcg)
  302. {
  303. struct mem_cgroup_per_node *mz;
  304. struct lruvec *lruvec;
  305. if (mem_cgroup_disabled()) {
  306. lruvec = node_lruvec(pgdat);
  307. goto out;
  308. }
  309. mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
  310. lruvec = &mz->lruvec;
  311. out:
  312. /*
  313. * Since a node can be onlined after the mem_cgroup was created,
  314. * we have to be prepared to initialize lruvec->pgdat here;
  315. * and if offlined then reonlined, we need to reinitialize it.
  316. */
  317. if (unlikely(lruvec->pgdat != pgdat))
  318. lruvec->pgdat = pgdat;
  319. return lruvec;
  320. }
  321. struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
  322. bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
  323. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  324. struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
  325. struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
  326. static inline
  327. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
  328. return css ? container_of(css, struct mem_cgroup, css) : NULL;
  329. }
  330. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  331. {
  332. if (memcg)
  333. css_put(&memcg->css);
  334. }
  335. #define mem_cgroup_from_counter(counter, member) \
  336. container_of(counter, struct mem_cgroup, member)
  337. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  338. struct mem_cgroup *,
  339. struct mem_cgroup_reclaim_cookie *);
  340. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  341. int mem_cgroup_scan_tasks(struct mem_cgroup *,
  342. int (*)(struct task_struct *, void *), void *);
  343. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  344. {
  345. if (mem_cgroup_disabled())
  346. return 0;
  347. return memcg->id.id;
  348. }
  349. struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
  350. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  351. {
  352. struct mem_cgroup_per_node *mz;
  353. if (mem_cgroup_disabled())
  354. return NULL;
  355. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  356. return mz->memcg;
  357. }
  358. /**
  359. * parent_mem_cgroup - find the accounting parent of a memcg
  360. * @memcg: memcg whose parent to find
  361. *
  362. * Returns the parent memcg, or NULL if this is the root or the memory
  363. * controller is in legacy no-hierarchy mode.
  364. */
  365. static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  366. {
  367. if (!memcg->memory.parent)
  368. return NULL;
  369. return mem_cgroup_from_counter(memcg->memory.parent, memory);
  370. }
  371. static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  372. struct mem_cgroup *root)
  373. {
  374. if (root == memcg)
  375. return true;
  376. if (!root->use_hierarchy)
  377. return false;
  378. return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
  379. }
  380. static inline bool mm_match_cgroup(struct mm_struct *mm,
  381. struct mem_cgroup *memcg)
  382. {
  383. struct mem_cgroup *task_memcg;
  384. bool match = false;
  385. rcu_read_lock();
  386. task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  387. if (task_memcg)
  388. match = mem_cgroup_is_descendant(task_memcg, memcg);
  389. rcu_read_unlock();
  390. return match;
  391. }
  392. struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
  393. ino_t page_cgroup_ino(struct page *page);
  394. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  395. {
  396. if (mem_cgroup_disabled())
  397. return true;
  398. return !!(memcg->css.flags & CSS_ONLINE);
  399. }
  400. /*
  401. * For memory reclaim.
  402. */
  403. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  404. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  405. int zid, int nr_pages);
  406. unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  407. int nid, unsigned int lru_mask);
  408. static inline
  409. unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  410. {
  411. struct mem_cgroup_per_node *mz;
  412. unsigned long nr_pages = 0;
  413. int zid;
  414. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  415. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  416. nr_pages += mz->lru_zone_size[zid][lru];
  417. return nr_pages;
  418. }
  419. static inline
  420. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  421. enum lru_list lru, int zone_idx)
  422. {
  423. struct mem_cgroup_per_node *mz;
  424. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  425. return mz->lru_zone_size[zone_idx][lru];
  426. }
  427. void mem_cgroup_handle_over_high(void);
  428. unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
  429. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  430. struct task_struct *p);
  431. static inline void mem_cgroup_enter_user_fault(void)
  432. {
  433. WARN_ON(current->in_user_fault);
  434. current->in_user_fault = 1;
  435. }
  436. static inline void mem_cgroup_exit_user_fault(void)
  437. {
  438. WARN_ON(!current->in_user_fault);
  439. current->in_user_fault = 0;
  440. }
  441. static inline bool task_in_memcg_oom(struct task_struct *p)
  442. {
  443. return p->memcg_in_oom;
  444. }
  445. bool mem_cgroup_oom_synchronize(bool wait);
  446. #ifdef CONFIG_MEMCG_SWAP
  447. extern int do_swap_account;
  448. #endif
  449. struct mem_cgroup *lock_page_memcg(struct page *page);
  450. void __unlock_page_memcg(struct mem_cgroup *memcg);
  451. void unlock_page_memcg(struct page *page);
  452. /* idx can be of type enum memcg_stat_item or node_stat_item */
  453. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  454. int idx)
  455. {
  456. long x = atomic_long_read(&memcg->stat[idx]);
  457. #ifdef CONFIG_SMP
  458. if (x < 0)
  459. x = 0;
  460. #endif
  461. return x;
  462. }
  463. /* idx can be of type enum memcg_stat_item or node_stat_item */
  464. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  465. int idx, int val)
  466. {
  467. long x;
  468. if (mem_cgroup_disabled())
  469. return;
  470. x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
  471. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  472. atomic_long_add(x, &memcg->stat[idx]);
  473. x = 0;
  474. }
  475. __this_cpu_write(memcg->stat_cpu->count[idx], x);
  476. }
  477. /* idx can be of type enum memcg_stat_item or node_stat_item */
  478. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  479. int idx, int val)
  480. {
  481. unsigned long flags;
  482. local_irq_save(flags);
  483. __mod_memcg_state(memcg, idx, val);
  484. local_irq_restore(flags);
  485. }
  486. /**
  487. * mod_memcg_page_state - update page state statistics
  488. * @page: the page
  489. * @idx: page state item to account
  490. * @val: number of pages (positive or negative)
  491. *
  492. * The @page must be locked or the caller must use lock_page_memcg()
  493. * to prevent double accounting when the page is concurrently being
  494. * moved to another memcg:
  495. *
  496. * lock_page(page) or lock_page_memcg(page)
  497. * if (TestClearPageState(page))
  498. * mod_memcg_page_state(page, state, -1);
  499. * unlock_page(page) or unlock_page_memcg(page)
  500. *
  501. * Kernel pages are an exception to this, since they'll never move.
  502. */
  503. static inline void __mod_memcg_page_state(struct page *page,
  504. int idx, int val)
  505. {
  506. if (page->mem_cgroup)
  507. __mod_memcg_state(page->mem_cgroup, idx, val);
  508. }
  509. static inline void mod_memcg_page_state(struct page *page,
  510. int idx, int val)
  511. {
  512. if (page->mem_cgroup)
  513. mod_memcg_state(page->mem_cgroup, idx, val);
  514. }
  515. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  516. enum node_stat_item idx)
  517. {
  518. struct mem_cgroup_per_node *pn;
  519. long x;
  520. if (mem_cgroup_disabled())
  521. return node_page_state(lruvec_pgdat(lruvec), idx);
  522. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  523. x = atomic_long_read(&pn->lruvec_stat[idx]);
  524. #ifdef CONFIG_SMP
  525. if (x < 0)
  526. x = 0;
  527. #endif
  528. return x;
  529. }
  530. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  531. enum node_stat_item idx, int val)
  532. {
  533. struct mem_cgroup_per_node *pn;
  534. long x;
  535. /* Update node */
  536. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  537. if (mem_cgroup_disabled())
  538. return;
  539. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  540. /* Update memcg */
  541. __mod_memcg_state(pn->memcg, idx, val);
  542. /* Update lruvec */
  543. x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
  544. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  545. atomic_long_add(x, &pn->lruvec_stat[idx]);
  546. x = 0;
  547. }
  548. __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
  549. }
  550. static inline void mod_lruvec_state(struct lruvec *lruvec,
  551. enum node_stat_item idx, int val)
  552. {
  553. unsigned long flags;
  554. local_irq_save(flags);
  555. __mod_lruvec_state(lruvec, idx, val);
  556. local_irq_restore(flags);
  557. }
  558. static inline void __mod_lruvec_page_state(struct page *page,
  559. enum node_stat_item idx, int val)
  560. {
  561. pg_data_t *pgdat = page_pgdat(page);
  562. struct lruvec *lruvec;
  563. /* Untracked pages have no memcg, no lruvec. Update only the node */
  564. if (!page->mem_cgroup) {
  565. __mod_node_page_state(pgdat, idx, val);
  566. return;
  567. }
  568. lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
  569. __mod_lruvec_state(lruvec, idx, val);
  570. }
  571. static inline void mod_lruvec_page_state(struct page *page,
  572. enum node_stat_item idx, int val)
  573. {
  574. unsigned long flags;
  575. local_irq_save(flags);
  576. __mod_lruvec_page_state(page, idx, val);
  577. local_irq_restore(flags);
  578. }
  579. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  580. gfp_t gfp_mask,
  581. unsigned long *total_scanned);
  582. static inline void __count_memcg_events(struct mem_cgroup *memcg,
  583. enum vm_event_item idx,
  584. unsigned long count)
  585. {
  586. unsigned long x;
  587. if (mem_cgroup_disabled())
  588. return;
  589. x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
  590. if (unlikely(x > MEMCG_CHARGE_BATCH)) {
  591. atomic_long_add(x, &memcg->events[idx]);
  592. x = 0;
  593. }
  594. __this_cpu_write(memcg->stat_cpu->events[idx], x);
  595. }
  596. static inline void count_memcg_events(struct mem_cgroup *memcg,
  597. enum vm_event_item idx,
  598. unsigned long count)
  599. {
  600. unsigned long flags;
  601. local_irq_save(flags);
  602. __count_memcg_events(memcg, idx, count);
  603. local_irq_restore(flags);
  604. }
  605. static inline void count_memcg_page_event(struct page *page,
  606. enum vm_event_item idx)
  607. {
  608. if (page->mem_cgroup)
  609. count_memcg_events(page->mem_cgroup, idx, 1);
  610. }
  611. static inline void count_memcg_event_mm(struct mm_struct *mm,
  612. enum vm_event_item idx)
  613. {
  614. struct mem_cgroup *memcg;
  615. if (mem_cgroup_disabled())
  616. return;
  617. rcu_read_lock();
  618. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  619. if (likely(memcg))
  620. count_memcg_events(memcg, idx, 1);
  621. rcu_read_unlock();
  622. }
  623. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  624. enum memcg_memory_event event)
  625. {
  626. atomic_long_inc(&memcg->memory_events[event]);
  627. cgroup_file_notify(&memcg->events_file);
  628. }
  629. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  630. enum memcg_memory_event event)
  631. {
  632. struct mem_cgroup *memcg;
  633. if (mem_cgroup_disabled())
  634. return;
  635. rcu_read_lock();
  636. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  637. if (likely(memcg))
  638. memcg_memory_event(memcg, event);
  639. rcu_read_unlock();
  640. }
  641. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  642. void mem_cgroup_split_huge_fixup(struct page *head);
  643. #endif
  644. #else /* CONFIG_MEMCG */
  645. #define MEM_CGROUP_ID_SHIFT 0
  646. #define MEM_CGROUP_ID_MAX 0
  647. struct mem_cgroup;
  648. static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  649. {
  650. return true;
  651. }
  652. static inline bool mem_cgroup_disabled(void)
  653. {
  654. return true;
  655. }
  656. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  657. enum memcg_memory_event event)
  658. {
  659. }
  660. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  661. enum memcg_memory_event event)
  662. {
  663. }
  664. static inline enum mem_cgroup_protection mem_cgroup_protected(
  665. struct mem_cgroup *root, struct mem_cgroup *memcg)
  666. {
  667. return MEMCG_PROT_NONE;
  668. }
  669. static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  670. gfp_t gfp_mask,
  671. struct mem_cgroup **memcgp,
  672. bool compound)
  673. {
  674. *memcgp = NULL;
  675. return 0;
  676. }
  677. static inline int mem_cgroup_try_charge_delay(struct page *page,
  678. struct mm_struct *mm,
  679. gfp_t gfp_mask,
  680. struct mem_cgroup **memcgp,
  681. bool compound)
  682. {
  683. *memcgp = NULL;
  684. return 0;
  685. }
  686. static inline void mem_cgroup_commit_charge(struct page *page,
  687. struct mem_cgroup *memcg,
  688. bool lrucare, bool compound)
  689. {
  690. }
  691. static inline void mem_cgroup_cancel_charge(struct page *page,
  692. struct mem_cgroup *memcg,
  693. bool compound)
  694. {
  695. }
  696. static inline void mem_cgroup_uncharge(struct page *page)
  697. {
  698. }
  699. static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
  700. {
  701. }
  702. static inline void mem_cgroup_migrate(struct page *old, struct page *new)
  703. {
  704. }
  705. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  706. struct mem_cgroup *memcg)
  707. {
  708. return node_lruvec(pgdat);
  709. }
  710. static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  711. struct pglist_data *pgdat)
  712. {
  713. return &pgdat->lruvec;
  714. }
  715. static inline bool mm_match_cgroup(struct mm_struct *mm,
  716. struct mem_cgroup *memcg)
  717. {
  718. return true;
  719. }
  720. static inline bool task_in_mem_cgroup(struct task_struct *task,
  721. const struct mem_cgroup *memcg)
  722. {
  723. return true;
  724. }
  725. static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
  726. {
  727. return NULL;
  728. }
  729. static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
  730. {
  731. return NULL;
  732. }
  733. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  734. {
  735. }
  736. static inline struct mem_cgroup *
  737. mem_cgroup_iter(struct mem_cgroup *root,
  738. struct mem_cgroup *prev,
  739. struct mem_cgroup_reclaim_cookie *reclaim)
  740. {
  741. return NULL;
  742. }
  743. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  744. struct mem_cgroup *prev)
  745. {
  746. }
  747. static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
  748. int (*fn)(struct task_struct *, void *), void *arg)
  749. {
  750. return 0;
  751. }
  752. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  753. {
  754. return 0;
  755. }
  756. static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
  757. {
  758. WARN_ON_ONCE(id);
  759. /* XXX: This should always return root_mem_cgroup */
  760. return NULL;
  761. }
  762. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  763. {
  764. return NULL;
  765. }
  766. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  767. {
  768. return true;
  769. }
  770. static inline unsigned long
  771. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  772. {
  773. return 0;
  774. }
  775. static inline
  776. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  777. enum lru_list lru, int zone_idx)
  778. {
  779. return 0;
  780. }
  781. static inline unsigned long
  782. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  783. int nid, unsigned int lru_mask)
  784. {
  785. return 0;
  786. }
  787. static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
  788. {
  789. return 0;
  790. }
  791. static inline void
  792. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  793. {
  794. }
  795. static inline struct mem_cgroup *lock_page_memcg(struct page *page)
  796. {
  797. return NULL;
  798. }
  799. static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
  800. {
  801. }
  802. static inline void unlock_page_memcg(struct page *page)
  803. {
  804. }
  805. static inline void mem_cgroup_handle_over_high(void)
  806. {
  807. }
  808. static inline void mem_cgroup_enter_user_fault(void)
  809. {
  810. }
  811. static inline void mem_cgroup_exit_user_fault(void)
  812. {
  813. }
  814. static inline bool task_in_memcg_oom(struct task_struct *p)
  815. {
  816. return false;
  817. }
  818. static inline bool mem_cgroup_oom_synchronize(bool wait)
  819. {
  820. return false;
  821. }
  822. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  823. int idx)
  824. {
  825. return 0;
  826. }
  827. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  828. int idx,
  829. int nr)
  830. {
  831. }
  832. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  833. int idx,
  834. int nr)
  835. {
  836. }
  837. static inline void __mod_memcg_page_state(struct page *page,
  838. int idx,
  839. int nr)
  840. {
  841. }
  842. static inline void mod_memcg_page_state(struct page *page,
  843. int idx,
  844. int nr)
  845. {
  846. }
  847. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  848. enum node_stat_item idx)
  849. {
  850. return node_page_state(lruvec_pgdat(lruvec), idx);
  851. }
  852. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  853. enum node_stat_item idx, int val)
  854. {
  855. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  856. }
  857. static inline void mod_lruvec_state(struct lruvec *lruvec,
  858. enum node_stat_item idx, int val)
  859. {
  860. mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  861. }
  862. static inline void __mod_lruvec_page_state(struct page *page,
  863. enum node_stat_item idx, int val)
  864. {
  865. __mod_node_page_state(page_pgdat(page), idx, val);
  866. }
  867. static inline void mod_lruvec_page_state(struct page *page,
  868. enum node_stat_item idx, int val)
  869. {
  870. mod_node_page_state(page_pgdat(page), idx, val);
  871. }
  872. static inline
  873. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  874. gfp_t gfp_mask,
  875. unsigned long *total_scanned)
  876. {
  877. return 0;
  878. }
  879. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  880. {
  881. }
  882. static inline void count_memcg_events(struct mem_cgroup *memcg,
  883. enum vm_event_item idx,
  884. unsigned long count)
  885. {
  886. }
  887. static inline void count_memcg_page_event(struct page *page,
  888. int idx)
  889. {
  890. }
  891. static inline
  892. void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
  893. {
  894. }
  895. #endif /* CONFIG_MEMCG */
  896. /* idx can be of type enum memcg_stat_item or node_stat_item */
  897. static inline void __inc_memcg_state(struct mem_cgroup *memcg,
  898. int idx)
  899. {
  900. __mod_memcg_state(memcg, idx, 1);
  901. }
  902. /* idx can be of type enum memcg_stat_item or node_stat_item */
  903. static inline void __dec_memcg_state(struct mem_cgroup *memcg,
  904. int idx)
  905. {
  906. __mod_memcg_state(memcg, idx, -1);
  907. }
  908. /* idx can be of type enum memcg_stat_item or node_stat_item */
  909. static inline void __inc_memcg_page_state(struct page *page,
  910. int idx)
  911. {
  912. __mod_memcg_page_state(page, idx, 1);
  913. }
  914. /* idx can be of type enum memcg_stat_item or node_stat_item */
  915. static inline void __dec_memcg_page_state(struct page *page,
  916. int idx)
  917. {
  918. __mod_memcg_page_state(page, idx, -1);
  919. }
  920. static inline void __inc_lruvec_state(struct lruvec *lruvec,
  921. enum node_stat_item idx)
  922. {
  923. __mod_lruvec_state(lruvec, idx, 1);
  924. }
  925. static inline void __dec_lruvec_state(struct lruvec *lruvec,
  926. enum node_stat_item idx)
  927. {
  928. __mod_lruvec_state(lruvec, idx, -1);
  929. }
  930. static inline void __inc_lruvec_page_state(struct page *page,
  931. enum node_stat_item idx)
  932. {
  933. __mod_lruvec_page_state(page, idx, 1);
  934. }
  935. static inline void __dec_lruvec_page_state(struct page *page,
  936. enum node_stat_item idx)
  937. {
  938. __mod_lruvec_page_state(page, idx, -1);
  939. }
  940. /* idx can be of type enum memcg_stat_item or node_stat_item */
  941. static inline void inc_memcg_state(struct mem_cgroup *memcg,
  942. int idx)
  943. {
  944. mod_memcg_state(memcg, idx, 1);
  945. }
  946. /* idx can be of type enum memcg_stat_item or node_stat_item */
  947. static inline void dec_memcg_state(struct mem_cgroup *memcg,
  948. int idx)
  949. {
  950. mod_memcg_state(memcg, idx, -1);
  951. }
  952. /* idx can be of type enum memcg_stat_item or node_stat_item */
  953. static inline void inc_memcg_page_state(struct page *page,
  954. int idx)
  955. {
  956. mod_memcg_page_state(page, idx, 1);
  957. }
  958. /* idx can be of type enum memcg_stat_item or node_stat_item */
  959. static inline void dec_memcg_page_state(struct page *page,
  960. int idx)
  961. {
  962. mod_memcg_page_state(page, idx, -1);
  963. }
  964. static inline void inc_lruvec_state(struct lruvec *lruvec,
  965. enum node_stat_item idx)
  966. {
  967. mod_lruvec_state(lruvec, idx, 1);
  968. }
  969. static inline void dec_lruvec_state(struct lruvec *lruvec,
  970. enum node_stat_item idx)
  971. {
  972. mod_lruvec_state(lruvec, idx, -1);
  973. }
  974. static inline void inc_lruvec_page_state(struct page *page,
  975. enum node_stat_item idx)
  976. {
  977. mod_lruvec_page_state(page, idx, 1);
  978. }
  979. static inline void dec_lruvec_page_state(struct page *page,
  980. enum node_stat_item idx)
  981. {
  982. mod_lruvec_page_state(page, idx, -1);
  983. }
  984. #ifdef CONFIG_CGROUP_WRITEBACK
  985. struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
  986. void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
  987. unsigned long *pheadroom, unsigned long *pdirty,
  988. unsigned long *pwriteback);
  989. #else /* CONFIG_CGROUP_WRITEBACK */
  990. static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
  991. {
  992. return NULL;
  993. }
  994. static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
  995. unsigned long *pfilepages,
  996. unsigned long *pheadroom,
  997. unsigned long *pdirty,
  998. unsigned long *pwriteback)
  999. {
  1000. }
  1001. #endif /* CONFIG_CGROUP_WRITEBACK */
  1002. struct sock;
  1003. bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  1004. void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  1005. #ifdef CONFIG_MEMCG
  1006. extern struct static_key_false memcg_sockets_enabled_key;
  1007. #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
  1008. void mem_cgroup_sk_alloc(struct sock *sk);
  1009. void mem_cgroup_sk_free(struct sock *sk);
  1010. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  1011. {
  1012. if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
  1013. return true;
  1014. do {
  1015. if (time_before(jiffies, memcg->socket_pressure))
  1016. return true;
  1017. } while ((memcg = parent_mem_cgroup(memcg)));
  1018. return false;
  1019. }
  1020. #else
  1021. #define mem_cgroup_sockets_enabled 0
  1022. static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
  1023. static inline void mem_cgroup_sk_free(struct sock *sk) { };
  1024. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  1025. {
  1026. return false;
  1027. }
  1028. #endif
  1029. struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  1030. void memcg_kmem_put_cache(struct kmem_cache *cachep);
  1031. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  1032. struct mem_cgroup *memcg);
  1033. int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  1034. void memcg_kmem_uncharge(struct page *page, int order);
  1035. #ifdef CONFIG_MEMCG_KMEM
  1036. extern struct static_key_false memcg_kmem_enabled_key;
  1037. extern struct workqueue_struct *memcg_kmem_cache_wq;
  1038. extern int memcg_nr_cache_ids;
  1039. void memcg_get_cache_ids(void);
  1040. void memcg_put_cache_ids(void);
  1041. /*
  1042. * Helper macro to loop through all memcg-specific caches. Callers must still
  1043. * check if the cache is valid (it is either valid or NULL).
  1044. * the slab_mutex must be held when looping through those caches
  1045. */
  1046. #define for_each_memcg_cache_index(_idx) \
  1047. for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
  1048. static inline bool memcg_kmem_enabled(void)
  1049. {
  1050. return static_branch_unlikely(&memcg_kmem_enabled_key);
  1051. }
  1052. /*
  1053. * helper for accessing a memcg's index. It will be used as an index in the
  1054. * child cache array in kmem_cache, and also to derive its name. This function
  1055. * will return -1 when this is not a kmem-limited memcg.
  1056. */
  1057. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1058. {
  1059. return memcg ? memcg->kmemcg_id : -1;
  1060. }
  1061. extern int memcg_expand_shrinker_maps(int new_id);
  1062. extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
  1063. int nid, int shrinker_id);
  1064. #else
  1065. #define for_each_memcg_cache_index(_idx) \
  1066. for (; NULL; )
  1067. static inline bool memcg_kmem_enabled(void)
  1068. {
  1069. return false;
  1070. }
  1071. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1072. {
  1073. return -1;
  1074. }
  1075. static inline void memcg_get_cache_ids(void)
  1076. {
  1077. }
  1078. static inline void memcg_put_cache_ids(void)
  1079. {
  1080. }
  1081. static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
  1082. int nid, int shrinker_id) { }
  1083. #endif /* CONFIG_MEMCG_KMEM */
  1084. #endif /* _LINUX_MEMCONTROL_H */