memcontrol.h 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/jump_label.h>
  25. #include <linux/page_counter.h>
  26. #include <linux/vmpressure.h>
  27. #include <linux/eventfd.h>
  28. #include <linux/mm.h>
  29. #include <linux/vmstat.h>
  30. #include <linux/writeback.h>
  31. #include <linux/page-flags.h>
  32. struct mem_cgroup;
  33. struct page;
  34. struct mm_struct;
  35. struct kmem_cache;
  36. /* Cgroup-specific page state, on top of universal node page state */
  37. enum memcg_stat_item {
  38. MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  39. MEMCG_RSS,
  40. MEMCG_RSS_HUGE,
  41. MEMCG_SWAP,
  42. MEMCG_SOCK,
  43. /* XXX: why are these zone and not node counters? */
  44. MEMCG_KERNEL_STACK_KB,
  45. MEMCG_NR_STAT,
  46. };
  47. enum memcg_memory_event {
  48. MEMCG_LOW,
  49. MEMCG_HIGH,
  50. MEMCG_MAX,
  51. MEMCG_OOM,
  52. MEMCG_OOM_KILL,
  53. MEMCG_SWAP_MAX,
  54. MEMCG_SWAP_FAIL,
  55. MEMCG_NR_MEMORY_EVENTS,
  56. };
  57. enum mem_cgroup_protection {
  58. MEMCG_PROT_NONE,
  59. MEMCG_PROT_LOW,
  60. MEMCG_PROT_MIN,
  61. };
  62. struct mem_cgroup_reclaim_cookie {
  63. pg_data_t *pgdat;
  64. int priority;
  65. unsigned int generation;
  66. };
  67. #ifdef CONFIG_MEMCG
  68. #define MEM_CGROUP_ID_SHIFT 16
  69. #define MEM_CGROUP_ID_MAX USHRT_MAX
  70. struct mem_cgroup_id {
  71. int id;
  72. atomic_t ref;
  73. };
  74. /*
  75. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  76. * it will be incremated by the number of pages. This counter is used for
  77. * for trigger some periodic events. This is straightforward and better
  78. * than using jiffies etc. to handle periodic memcg event.
  79. */
  80. enum mem_cgroup_events_target {
  81. MEM_CGROUP_TARGET_THRESH,
  82. MEM_CGROUP_TARGET_SOFTLIMIT,
  83. MEM_CGROUP_TARGET_NUMAINFO,
  84. MEM_CGROUP_NTARGETS,
  85. };
  86. struct mem_cgroup_stat_cpu {
  87. long count[MEMCG_NR_STAT];
  88. unsigned long events[NR_VM_EVENT_ITEMS];
  89. unsigned long nr_page_events;
  90. unsigned long targets[MEM_CGROUP_NTARGETS];
  91. };
  92. struct mem_cgroup_reclaim_iter {
  93. struct mem_cgroup *position;
  94. /* scan generation, increased every round-trip */
  95. unsigned int generation;
  96. };
  97. struct lruvec_stat {
  98. long count[NR_VM_NODE_STAT_ITEMS];
  99. };
  100. /*
  101. * per-zone information in memory controller.
  102. */
  103. struct mem_cgroup_per_node {
  104. struct lruvec lruvec;
  105. struct lruvec_stat __percpu *lruvec_stat_cpu;
  106. atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
  107. unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
  108. struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
  109. struct rb_node tree_node; /* RB tree node */
  110. unsigned long usage_in_excess;/* Set to the value by which */
  111. /* the soft limit is exceeded*/
  112. bool on_tree;
  113. bool congested; /* memcg has many dirty pages */
  114. /* backed by a congested BDI */
  115. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  116. /* use container_of */
  117. };
  118. struct mem_cgroup_threshold {
  119. struct eventfd_ctx *eventfd;
  120. unsigned long threshold;
  121. };
  122. /* For threshold */
  123. struct mem_cgroup_threshold_ary {
  124. /* An array index points to threshold just below or equal to usage. */
  125. int current_threshold;
  126. /* Size of entries[] */
  127. unsigned int size;
  128. /* Array of thresholds */
  129. struct mem_cgroup_threshold entries[0];
  130. };
  131. struct mem_cgroup_thresholds {
  132. /* Primary thresholds array */
  133. struct mem_cgroup_threshold_ary *primary;
  134. /*
  135. * Spare threshold array.
  136. * This is needed to make mem_cgroup_unregister_event() "never fail".
  137. * It must be able to store at least primary->size - 1 entries.
  138. */
  139. struct mem_cgroup_threshold_ary *spare;
  140. };
  141. enum memcg_kmem_state {
  142. KMEM_NONE,
  143. KMEM_ALLOCATED,
  144. KMEM_ONLINE,
  145. };
  146. #if defined(CONFIG_SMP)
  147. struct memcg_padding {
  148. char x[0];
  149. } ____cacheline_internodealigned_in_smp;
  150. #define MEMCG_PADDING(name) struct memcg_padding name;
  151. #else
  152. #define MEMCG_PADDING(name)
  153. #endif
  154. /*
  155. * The memory controller data structure. The memory controller controls both
  156. * page cache and RSS per cgroup. We would eventually like to provide
  157. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  158. * to help the administrator determine what knobs to tune.
  159. */
  160. struct mem_cgroup {
  161. struct cgroup_subsys_state css;
  162. /* Private memcg ID. Used to ID objects that outlive the cgroup */
  163. struct mem_cgroup_id id;
  164. /* Accounted resources */
  165. struct page_counter memory;
  166. struct page_counter swap;
  167. /* Legacy consumer-oriented counters */
  168. struct page_counter memsw;
  169. struct page_counter kmem;
  170. struct page_counter tcpmem;
  171. /* Upper bound of normal memory consumption range */
  172. unsigned long high;
  173. /* Range enforcement for interrupt charges */
  174. struct work_struct high_work;
  175. unsigned long soft_limit;
  176. /* vmpressure notifications */
  177. struct vmpressure vmpressure;
  178. /*
  179. * Should the accounting and control be hierarchical, per subtree?
  180. */
  181. bool use_hierarchy;
  182. /* protected by memcg_oom_lock */
  183. bool oom_lock;
  184. int under_oom;
  185. int swappiness;
  186. /* OOM-Killer disable */
  187. int oom_kill_disable;
  188. /* memory.events */
  189. struct cgroup_file events_file;
  190. /* handle for "memory.swap.events" */
  191. struct cgroup_file swap_events_file;
  192. /* protect arrays of thresholds */
  193. struct mutex thresholds_lock;
  194. /* thresholds for memory usage. RCU-protected */
  195. struct mem_cgroup_thresholds thresholds;
  196. /* thresholds for mem+swap usage. RCU-protected */
  197. struct mem_cgroup_thresholds memsw_thresholds;
  198. /* For oom notifier event fd */
  199. struct list_head oom_notify;
  200. /*
  201. * Should we move charges of a task when a task is moved into this
  202. * mem_cgroup ? And what type of charges should we move ?
  203. */
  204. unsigned long move_charge_at_immigrate;
  205. /* taken only while moving_account > 0 */
  206. spinlock_t move_lock;
  207. unsigned long move_lock_flags;
  208. MEMCG_PADDING(_pad1_);
  209. /*
  210. * set > 0 if pages under this cgroup are moving to other cgroup.
  211. */
  212. atomic_t moving_account;
  213. struct task_struct *move_lock_task;
  214. /* memory.stat */
  215. struct mem_cgroup_stat_cpu __percpu *stat_cpu;
  216. MEMCG_PADDING(_pad2_);
  217. atomic_long_t stat[MEMCG_NR_STAT];
  218. atomic_long_t events[NR_VM_EVENT_ITEMS];
  219. atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
  220. unsigned long socket_pressure;
  221. /* Legacy tcp memory accounting */
  222. bool tcpmem_active;
  223. int tcpmem_pressure;
  224. #ifndef CONFIG_SLOB
  225. /* Index in the kmem_cache->memcg_params.memcg_caches array */
  226. int kmemcg_id;
  227. enum memcg_kmem_state kmem_state;
  228. struct list_head kmem_caches;
  229. #endif
  230. int last_scanned_node;
  231. #if MAX_NUMNODES > 1
  232. nodemask_t scan_nodes;
  233. atomic_t numainfo_events;
  234. atomic_t numainfo_updating;
  235. #endif
  236. #ifdef CONFIG_CGROUP_WRITEBACK
  237. struct list_head cgwb_list;
  238. struct wb_domain cgwb_domain;
  239. #endif
  240. /* List of events which userspace want to receive */
  241. struct list_head event_list;
  242. spinlock_t event_list_lock;
  243. struct mem_cgroup_per_node *nodeinfo[0];
  244. /* WARNING: nodeinfo must be the last member here */
  245. };
  246. /*
  247. * size of first charge trial. "32" comes from vmscan.c's magic value.
  248. * TODO: maybe necessary to use big numbers in big irons.
  249. */
  250. #define MEMCG_CHARGE_BATCH 32U
  251. extern struct mem_cgroup *root_mem_cgroup;
  252. static inline bool mem_cgroup_disabled(void)
  253. {
  254. return !cgroup_subsys_enabled(memory_cgrp_subsys);
  255. }
  256. enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
  257. struct mem_cgroup *memcg);
  258. int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  259. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  260. bool compound);
  261. void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  262. bool lrucare, bool compound);
  263. void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
  264. bool compound);
  265. void mem_cgroup_uncharge(struct page *page);
  266. void mem_cgroup_uncharge_list(struct list_head *page_list);
  267. void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
  268. static struct mem_cgroup_per_node *
  269. mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
  270. {
  271. return memcg->nodeinfo[nid];
  272. }
  273. /**
  274. * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
  275. * @node: node of the wanted lruvec
  276. * @memcg: memcg of the wanted lruvec
  277. *
  278. * Returns the lru list vector holding pages for a given @node or a given
  279. * @memcg and @zone. This can be the node lruvec, if the memory controller
  280. * is disabled.
  281. */
  282. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  283. struct mem_cgroup *memcg)
  284. {
  285. struct mem_cgroup_per_node *mz;
  286. struct lruvec *lruvec;
  287. if (mem_cgroup_disabled()) {
  288. lruvec = node_lruvec(pgdat);
  289. goto out;
  290. }
  291. mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
  292. lruvec = &mz->lruvec;
  293. out:
  294. /*
  295. * Since a node can be onlined after the mem_cgroup was created,
  296. * we have to be prepared to initialize lruvec->pgdat here;
  297. * and if offlined then reonlined, we need to reinitialize it.
  298. */
  299. if (unlikely(lruvec->pgdat != pgdat))
  300. lruvec->pgdat = pgdat;
  301. return lruvec;
  302. }
  303. struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
  304. bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
  305. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  306. static inline
  307. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
  308. return css ? container_of(css, struct mem_cgroup, css) : NULL;
  309. }
  310. #define mem_cgroup_from_counter(counter, member) \
  311. container_of(counter, struct mem_cgroup, member)
  312. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  313. struct mem_cgroup *,
  314. struct mem_cgroup_reclaim_cookie *);
  315. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  316. int mem_cgroup_scan_tasks(struct mem_cgroup *,
  317. int (*)(struct task_struct *, void *), void *);
  318. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  319. {
  320. if (mem_cgroup_disabled())
  321. return 0;
  322. return memcg->id.id;
  323. }
  324. struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
  325. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  326. {
  327. struct mem_cgroup_per_node *mz;
  328. if (mem_cgroup_disabled())
  329. return NULL;
  330. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  331. return mz->memcg;
  332. }
  333. /**
  334. * parent_mem_cgroup - find the accounting parent of a memcg
  335. * @memcg: memcg whose parent to find
  336. *
  337. * Returns the parent memcg, or NULL if this is the root or the memory
  338. * controller is in legacy no-hierarchy mode.
  339. */
  340. static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  341. {
  342. if (!memcg->memory.parent)
  343. return NULL;
  344. return mem_cgroup_from_counter(memcg->memory.parent, memory);
  345. }
  346. static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  347. struct mem_cgroup *root)
  348. {
  349. if (root == memcg)
  350. return true;
  351. if (!root->use_hierarchy)
  352. return false;
  353. return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
  354. }
  355. static inline bool mm_match_cgroup(struct mm_struct *mm,
  356. struct mem_cgroup *memcg)
  357. {
  358. struct mem_cgroup *task_memcg;
  359. bool match = false;
  360. rcu_read_lock();
  361. task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  362. if (task_memcg)
  363. match = mem_cgroup_is_descendant(task_memcg, memcg);
  364. rcu_read_unlock();
  365. return match;
  366. }
  367. struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
  368. ino_t page_cgroup_ino(struct page *page);
  369. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  370. {
  371. if (mem_cgroup_disabled())
  372. return true;
  373. return !!(memcg->css.flags & CSS_ONLINE);
  374. }
  375. /*
  376. * For memory reclaim.
  377. */
  378. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  379. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  380. int zid, int nr_pages);
  381. unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  382. int nid, unsigned int lru_mask);
  383. static inline
  384. unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  385. {
  386. struct mem_cgroup_per_node *mz;
  387. unsigned long nr_pages = 0;
  388. int zid;
  389. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  390. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  391. nr_pages += mz->lru_zone_size[zid][lru];
  392. return nr_pages;
  393. }
  394. static inline
  395. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  396. enum lru_list lru, int zone_idx)
  397. {
  398. struct mem_cgroup_per_node *mz;
  399. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  400. return mz->lru_zone_size[zone_idx][lru];
  401. }
  402. void mem_cgroup_handle_over_high(void);
  403. unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
  404. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  405. struct task_struct *p);
  406. static inline void mem_cgroup_oom_enable(void)
  407. {
  408. WARN_ON(current->memcg_may_oom);
  409. current->memcg_may_oom = 1;
  410. }
  411. static inline void mem_cgroup_oom_disable(void)
  412. {
  413. WARN_ON(!current->memcg_may_oom);
  414. current->memcg_may_oom = 0;
  415. }
  416. static inline bool task_in_memcg_oom(struct task_struct *p)
  417. {
  418. return p->memcg_in_oom;
  419. }
  420. bool mem_cgroup_oom_synchronize(bool wait);
  421. #ifdef CONFIG_MEMCG_SWAP
  422. extern int do_swap_account;
  423. #endif
  424. struct mem_cgroup *lock_page_memcg(struct page *page);
  425. void __unlock_page_memcg(struct mem_cgroup *memcg);
  426. void unlock_page_memcg(struct page *page);
  427. /* idx can be of type enum memcg_stat_item or node_stat_item */
  428. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  429. int idx)
  430. {
  431. long x = atomic_long_read(&memcg->stat[idx]);
  432. #ifdef CONFIG_SMP
  433. if (x < 0)
  434. x = 0;
  435. #endif
  436. return x;
  437. }
  438. /* idx can be of type enum memcg_stat_item or node_stat_item */
  439. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  440. int idx, int val)
  441. {
  442. long x;
  443. if (mem_cgroup_disabled())
  444. return;
  445. x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
  446. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  447. atomic_long_add(x, &memcg->stat[idx]);
  448. x = 0;
  449. }
  450. __this_cpu_write(memcg->stat_cpu->count[idx], x);
  451. }
  452. /* idx can be of type enum memcg_stat_item or node_stat_item */
  453. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  454. int idx, int val)
  455. {
  456. unsigned long flags;
  457. local_irq_save(flags);
  458. __mod_memcg_state(memcg, idx, val);
  459. local_irq_restore(flags);
  460. }
  461. /**
  462. * mod_memcg_page_state - update page state statistics
  463. * @page: the page
  464. * @idx: page state item to account
  465. * @val: number of pages (positive or negative)
  466. *
  467. * The @page must be locked or the caller must use lock_page_memcg()
  468. * to prevent double accounting when the page is concurrently being
  469. * moved to another memcg:
  470. *
  471. * lock_page(page) or lock_page_memcg(page)
  472. * if (TestClearPageState(page))
  473. * mod_memcg_page_state(page, state, -1);
  474. * unlock_page(page) or unlock_page_memcg(page)
  475. *
  476. * Kernel pages are an exception to this, since they'll never move.
  477. */
  478. static inline void __mod_memcg_page_state(struct page *page,
  479. int idx, int val)
  480. {
  481. if (page->mem_cgroup)
  482. __mod_memcg_state(page->mem_cgroup, idx, val);
  483. }
  484. static inline void mod_memcg_page_state(struct page *page,
  485. int idx, int val)
  486. {
  487. if (page->mem_cgroup)
  488. mod_memcg_state(page->mem_cgroup, idx, val);
  489. }
  490. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  491. enum node_stat_item idx)
  492. {
  493. struct mem_cgroup_per_node *pn;
  494. long x;
  495. if (mem_cgroup_disabled())
  496. return node_page_state(lruvec_pgdat(lruvec), idx);
  497. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  498. x = atomic_long_read(&pn->lruvec_stat[idx]);
  499. #ifdef CONFIG_SMP
  500. if (x < 0)
  501. x = 0;
  502. #endif
  503. return x;
  504. }
  505. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  506. enum node_stat_item idx, int val)
  507. {
  508. struct mem_cgroup_per_node *pn;
  509. long x;
  510. /* Update node */
  511. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  512. if (mem_cgroup_disabled())
  513. return;
  514. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  515. /* Update memcg */
  516. __mod_memcg_state(pn->memcg, idx, val);
  517. /* Update lruvec */
  518. x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
  519. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  520. atomic_long_add(x, &pn->lruvec_stat[idx]);
  521. x = 0;
  522. }
  523. __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
  524. }
  525. static inline void mod_lruvec_state(struct lruvec *lruvec,
  526. enum node_stat_item idx, int val)
  527. {
  528. unsigned long flags;
  529. local_irq_save(flags);
  530. __mod_lruvec_state(lruvec, idx, val);
  531. local_irq_restore(flags);
  532. }
  533. static inline void __mod_lruvec_page_state(struct page *page,
  534. enum node_stat_item idx, int val)
  535. {
  536. pg_data_t *pgdat = page_pgdat(page);
  537. struct lruvec *lruvec;
  538. /* Untracked pages have no memcg, no lruvec. Update only the node */
  539. if (!page->mem_cgroup) {
  540. __mod_node_page_state(pgdat, idx, val);
  541. return;
  542. }
  543. lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
  544. __mod_lruvec_state(lruvec, idx, val);
  545. }
  546. static inline void mod_lruvec_page_state(struct page *page,
  547. enum node_stat_item idx, int val)
  548. {
  549. unsigned long flags;
  550. local_irq_save(flags);
  551. __mod_lruvec_page_state(page, idx, val);
  552. local_irq_restore(flags);
  553. }
  554. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  555. gfp_t gfp_mask,
  556. unsigned long *total_scanned);
  557. static inline void __count_memcg_events(struct mem_cgroup *memcg,
  558. enum vm_event_item idx,
  559. unsigned long count)
  560. {
  561. unsigned long x;
  562. if (mem_cgroup_disabled())
  563. return;
  564. x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
  565. if (unlikely(x > MEMCG_CHARGE_BATCH)) {
  566. atomic_long_add(x, &memcg->events[idx]);
  567. x = 0;
  568. }
  569. __this_cpu_write(memcg->stat_cpu->events[idx], x);
  570. }
  571. static inline void count_memcg_events(struct mem_cgroup *memcg,
  572. enum vm_event_item idx,
  573. unsigned long count)
  574. {
  575. unsigned long flags;
  576. local_irq_save(flags);
  577. __count_memcg_events(memcg, idx, count);
  578. local_irq_restore(flags);
  579. }
  580. static inline void count_memcg_page_event(struct page *page,
  581. enum vm_event_item idx)
  582. {
  583. if (page->mem_cgroup)
  584. count_memcg_events(page->mem_cgroup, idx, 1);
  585. }
  586. static inline void count_memcg_event_mm(struct mm_struct *mm,
  587. enum vm_event_item idx)
  588. {
  589. struct mem_cgroup *memcg;
  590. if (mem_cgroup_disabled())
  591. return;
  592. rcu_read_lock();
  593. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  594. if (likely(memcg))
  595. count_memcg_events(memcg, idx, 1);
  596. rcu_read_unlock();
  597. }
  598. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  599. enum memcg_memory_event event)
  600. {
  601. atomic_long_inc(&memcg->memory_events[event]);
  602. cgroup_file_notify(&memcg->events_file);
  603. }
  604. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  605. enum memcg_memory_event event)
  606. {
  607. struct mem_cgroup *memcg;
  608. if (mem_cgroup_disabled())
  609. return;
  610. rcu_read_lock();
  611. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  612. if (likely(memcg))
  613. memcg_memory_event(memcg, event);
  614. rcu_read_unlock();
  615. }
  616. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  617. void mem_cgroup_split_huge_fixup(struct page *head);
  618. #endif
  619. #else /* CONFIG_MEMCG */
  620. #define MEM_CGROUP_ID_SHIFT 0
  621. #define MEM_CGROUP_ID_MAX 0
  622. struct mem_cgroup;
  623. static inline bool mem_cgroup_disabled(void)
  624. {
  625. return true;
  626. }
  627. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  628. enum memcg_memory_event event)
  629. {
  630. }
  631. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  632. enum memcg_memory_event event)
  633. {
  634. }
  635. static inline enum mem_cgroup_protection mem_cgroup_protected(
  636. struct mem_cgroup *root, struct mem_cgroup *memcg)
  637. {
  638. return MEMCG_PROT_NONE;
  639. }
  640. static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  641. gfp_t gfp_mask,
  642. struct mem_cgroup **memcgp,
  643. bool compound)
  644. {
  645. *memcgp = NULL;
  646. return 0;
  647. }
  648. static inline void mem_cgroup_commit_charge(struct page *page,
  649. struct mem_cgroup *memcg,
  650. bool lrucare, bool compound)
  651. {
  652. }
  653. static inline void mem_cgroup_cancel_charge(struct page *page,
  654. struct mem_cgroup *memcg,
  655. bool compound)
  656. {
  657. }
  658. static inline void mem_cgroup_uncharge(struct page *page)
  659. {
  660. }
  661. static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
  662. {
  663. }
  664. static inline void mem_cgroup_migrate(struct page *old, struct page *new)
  665. {
  666. }
  667. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  668. struct mem_cgroup *memcg)
  669. {
  670. return node_lruvec(pgdat);
  671. }
  672. static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  673. struct pglist_data *pgdat)
  674. {
  675. return &pgdat->lruvec;
  676. }
  677. static inline bool mm_match_cgroup(struct mm_struct *mm,
  678. struct mem_cgroup *memcg)
  679. {
  680. return true;
  681. }
  682. static inline bool task_in_mem_cgroup(struct task_struct *task,
  683. const struct mem_cgroup *memcg)
  684. {
  685. return true;
  686. }
  687. static inline struct mem_cgroup *
  688. mem_cgroup_iter(struct mem_cgroup *root,
  689. struct mem_cgroup *prev,
  690. struct mem_cgroup_reclaim_cookie *reclaim)
  691. {
  692. return NULL;
  693. }
  694. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  695. struct mem_cgroup *prev)
  696. {
  697. }
  698. static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
  699. int (*fn)(struct task_struct *, void *), void *arg)
  700. {
  701. return 0;
  702. }
  703. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  704. {
  705. return 0;
  706. }
  707. static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
  708. {
  709. WARN_ON_ONCE(id);
  710. /* XXX: This should always return root_mem_cgroup */
  711. return NULL;
  712. }
  713. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  714. {
  715. return NULL;
  716. }
  717. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  718. {
  719. return true;
  720. }
  721. static inline unsigned long
  722. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  723. {
  724. return 0;
  725. }
  726. static inline
  727. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  728. enum lru_list lru, int zone_idx)
  729. {
  730. return 0;
  731. }
  732. static inline unsigned long
  733. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  734. int nid, unsigned int lru_mask)
  735. {
  736. return 0;
  737. }
  738. static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
  739. {
  740. return 0;
  741. }
  742. static inline void
  743. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  744. {
  745. }
  746. static inline struct mem_cgroup *lock_page_memcg(struct page *page)
  747. {
  748. return NULL;
  749. }
  750. static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
  751. {
  752. }
  753. static inline void unlock_page_memcg(struct page *page)
  754. {
  755. }
  756. static inline void mem_cgroup_handle_over_high(void)
  757. {
  758. }
  759. static inline void mem_cgroup_oom_enable(void)
  760. {
  761. }
  762. static inline void mem_cgroup_oom_disable(void)
  763. {
  764. }
  765. static inline bool task_in_memcg_oom(struct task_struct *p)
  766. {
  767. return false;
  768. }
  769. static inline bool mem_cgroup_oom_synchronize(bool wait)
  770. {
  771. return false;
  772. }
  773. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  774. int idx)
  775. {
  776. return 0;
  777. }
  778. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  779. int idx,
  780. int nr)
  781. {
  782. }
  783. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  784. int idx,
  785. int nr)
  786. {
  787. }
  788. static inline void __mod_memcg_page_state(struct page *page,
  789. int idx,
  790. int nr)
  791. {
  792. }
  793. static inline void mod_memcg_page_state(struct page *page,
  794. int idx,
  795. int nr)
  796. {
  797. }
  798. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  799. enum node_stat_item idx)
  800. {
  801. return node_page_state(lruvec_pgdat(lruvec), idx);
  802. }
  803. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  804. enum node_stat_item idx, int val)
  805. {
  806. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  807. }
  808. static inline void mod_lruvec_state(struct lruvec *lruvec,
  809. enum node_stat_item idx, int val)
  810. {
  811. mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  812. }
  813. static inline void __mod_lruvec_page_state(struct page *page,
  814. enum node_stat_item idx, int val)
  815. {
  816. __mod_node_page_state(page_pgdat(page), idx, val);
  817. }
  818. static inline void mod_lruvec_page_state(struct page *page,
  819. enum node_stat_item idx, int val)
  820. {
  821. mod_node_page_state(page_pgdat(page), idx, val);
  822. }
  823. static inline
  824. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  825. gfp_t gfp_mask,
  826. unsigned long *total_scanned)
  827. {
  828. return 0;
  829. }
  830. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  831. {
  832. }
  833. static inline void count_memcg_events(struct mem_cgroup *memcg,
  834. enum vm_event_item idx,
  835. unsigned long count)
  836. {
  837. }
  838. static inline void count_memcg_page_event(struct page *page,
  839. int idx)
  840. {
  841. }
  842. static inline
  843. void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
  844. {
  845. }
  846. #endif /* CONFIG_MEMCG */
  847. /* idx can be of type enum memcg_stat_item or node_stat_item */
  848. static inline void __inc_memcg_state(struct mem_cgroup *memcg,
  849. int idx)
  850. {
  851. __mod_memcg_state(memcg, idx, 1);
  852. }
  853. /* idx can be of type enum memcg_stat_item or node_stat_item */
  854. static inline void __dec_memcg_state(struct mem_cgroup *memcg,
  855. int idx)
  856. {
  857. __mod_memcg_state(memcg, idx, -1);
  858. }
  859. /* idx can be of type enum memcg_stat_item or node_stat_item */
  860. static inline void __inc_memcg_page_state(struct page *page,
  861. int idx)
  862. {
  863. __mod_memcg_page_state(page, idx, 1);
  864. }
  865. /* idx can be of type enum memcg_stat_item or node_stat_item */
  866. static inline void __dec_memcg_page_state(struct page *page,
  867. int idx)
  868. {
  869. __mod_memcg_page_state(page, idx, -1);
  870. }
  871. static inline void __inc_lruvec_state(struct lruvec *lruvec,
  872. enum node_stat_item idx)
  873. {
  874. __mod_lruvec_state(lruvec, idx, 1);
  875. }
  876. static inline void __dec_lruvec_state(struct lruvec *lruvec,
  877. enum node_stat_item idx)
  878. {
  879. __mod_lruvec_state(lruvec, idx, -1);
  880. }
  881. static inline void __inc_lruvec_page_state(struct page *page,
  882. enum node_stat_item idx)
  883. {
  884. __mod_lruvec_page_state(page, idx, 1);
  885. }
  886. static inline void __dec_lruvec_page_state(struct page *page,
  887. enum node_stat_item idx)
  888. {
  889. __mod_lruvec_page_state(page, idx, -1);
  890. }
  891. /* idx can be of type enum memcg_stat_item or node_stat_item */
  892. static inline void inc_memcg_state(struct mem_cgroup *memcg,
  893. int idx)
  894. {
  895. mod_memcg_state(memcg, idx, 1);
  896. }
  897. /* idx can be of type enum memcg_stat_item or node_stat_item */
  898. static inline void dec_memcg_state(struct mem_cgroup *memcg,
  899. int idx)
  900. {
  901. mod_memcg_state(memcg, idx, -1);
  902. }
  903. /* idx can be of type enum memcg_stat_item or node_stat_item */
  904. static inline void inc_memcg_page_state(struct page *page,
  905. int idx)
  906. {
  907. mod_memcg_page_state(page, idx, 1);
  908. }
  909. /* idx can be of type enum memcg_stat_item or node_stat_item */
  910. static inline void dec_memcg_page_state(struct page *page,
  911. int idx)
  912. {
  913. mod_memcg_page_state(page, idx, -1);
  914. }
  915. static inline void inc_lruvec_state(struct lruvec *lruvec,
  916. enum node_stat_item idx)
  917. {
  918. mod_lruvec_state(lruvec, idx, 1);
  919. }
  920. static inline void dec_lruvec_state(struct lruvec *lruvec,
  921. enum node_stat_item idx)
  922. {
  923. mod_lruvec_state(lruvec, idx, -1);
  924. }
  925. static inline void inc_lruvec_page_state(struct page *page,
  926. enum node_stat_item idx)
  927. {
  928. mod_lruvec_page_state(page, idx, 1);
  929. }
  930. static inline void dec_lruvec_page_state(struct page *page,
  931. enum node_stat_item idx)
  932. {
  933. mod_lruvec_page_state(page, idx, -1);
  934. }
  935. #ifdef CONFIG_CGROUP_WRITEBACK
  936. struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
  937. void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
  938. unsigned long *pheadroom, unsigned long *pdirty,
  939. unsigned long *pwriteback);
  940. #else /* CONFIG_CGROUP_WRITEBACK */
  941. static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
  942. {
  943. return NULL;
  944. }
  945. static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
  946. unsigned long *pfilepages,
  947. unsigned long *pheadroom,
  948. unsigned long *pdirty,
  949. unsigned long *pwriteback)
  950. {
  951. }
  952. #endif /* CONFIG_CGROUP_WRITEBACK */
  953. struct sock;
  954. bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  955. void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  956. #ifdef CONFIG_MEMCG
  957. extern struct static_key_false memcg_sockets_enabled_key;
  958. #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
  959. void mem_cgroup_sk_alloc(struct sock *sk);
  960. void mem_cgroup_sk_free(struct sock *sk);
  961. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  962. {
  963. if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
  964. return true;
  965. do {
  966. if (time_before(jiffies, memcg->socket_pressure))
  967. return true;
  968. } while ((memcg = parent_mem_cgroup(memcg)));
  969. return false;
  970. }
  971. #else
  972. #define mem_cgroup_sockets_enabled 0
  973. static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
  974. static inline void mem_cgroup_sk_free(struct sock *sk) { };
  975. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  976. {
  977. return false;
  978. }
  979. #endif
  980. struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  981. void memcg_kmem_put_cache(struct kmem_cache *cachep);
  982. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  983. struct mem_cgroup *memcg);
  984. int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  985. void memcg_kmem_uncharge(struct page *page, int order);
  986. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  987. extern struct static_key_false memcg_kmem_enabled_key;
  988. extern struct workqueue_struct *memcg_kmem_cache_wq;
  989. extern int memcg_nr_cache_ids;
  990. void memcg_get_cache_ids(void);
  991. void memcg_put_cache_ids(void);
  992. /*
  993. * Helper macro to loop through all memcg-specific caches. Callers must still
  994. * check if the cache is valid (it is either valid or NULL).
  995. * the slab_mutex must be held when looping through those caches
  996. */
  997. #define for_each_memcg_cache_index(_idx) \
  998. for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
  999. static inline bool memcg_kmem_enabled(void)
  1000. {
  1001. return static_branch_unlikely(&memcg_kmem_enabled_key);
  1002. }
  1003. /*
  1004. * helper for accessing a memcg's index. It will be used as an index in the
  1005. * child cache array in kmem_cache, and also to derive its name. This function
  1006. * will return -1 when this is not a kmem-limited memcg.
  1007. */
  1008. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1009. {
  1010. return memcg ? memcg->kmemcg_id : -1;
  1011. }
  1012. #else
  1013. #define for_each_memcg_cache_index(_idx) \
  1014. for (; NULL; )
  1015. static inline bool memcg_kmem_enabled(void)
  1016. {
  1017. return false;
  1018. }
  1019. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1020. {
  1021. return -1;
  1022. }
  1023. static inline void memcg_get_cache_ids(void)
  1024. {
  1025. }
  1026. static inline void memcg_put_cache_ids(void)
  1027. {
  1028. }
  1029. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  1030. #endif /* _LINUX_MEMCONTROL_H */