memcontrol.h 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/jump_label.h>
  25. #include <linux/page_counter.h>
  26. #include <linux/vmpressure.h>
  27. #include <linux/eventfd.h>
  28. #include <linux/mm.h>
  29. #include <linux/vmstat.h>
  30. #include <linux/writeback.h>
  31. #include <linux/page-flags.h>
  32. struct mem_cgroup;
  33. struct page;
  34. struct mm_struct;
  35. struct kmem_cache;
  36. /* Cgroup-specific page state, on top of universal node page state */
  37. enum memcg_stat_item {
  38. MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  39. MEMCG_RSS,
  40. MEMCG_RSS_HUGE,
  41. MEMCG_SWAP,
  42. MEMCG_SOCK,
  43. /* XXX: why are these zone and not node counters? */
  44. MEMCG_KERNEL_STACK_KB,
  45. MEMCG_NR_STAT,
  46. };
  47. enum memcg_memory_event {
  48. MEMCG_LOW,
  49. MEMCG_HIGH,
  50. MEMCG_MAX,
  51. MEMCG_OOM,
  52. MEMCG_OOM_KILL,
  53. MEMCG_SWAP_MAX,
  54. MEMCG_SWAP_FAIL,
  55. MEMCG_NR_MEMORY_EVENTS,
  56. };
  57. enum mem_cgroup_protection {
  58. MEMCG_PROT_NONE,
  59. MEMCG_PROT_LOW,
  60. MEMCG_PROT_MIN,
  61. };
  62. struct mem_cgroup_reclaim_cookie {
  63. pg_data_t *pgdat;
  64. int priority;
  65. unsigned int generation;
  66. };
  67. #ifdef CONFIG_MEMCG
  68. #define MEM_CGROUP_ID_SHIFT 16
  69. #define MEM_CGROUP_ID_MAX USHRT_MAX
  70. struct mem_cgroup_id {
  71. int id;
  72. atomic_t ref;
  73. };
  74. /*
  75. * Per memcg event counter is incremented at every pagein/pageout. With THP,
  76. * it will be incremated by the number of pages. This counter is used for
  77. * for trigger some periodic events. This is straightforward and better
  78. * than using jiffies etc. to handle periodic memcg event.
  79. */
  80. enum mem_cgroup_events_target {
  81. MEM_CGROUP_TARGET_THRESH,
  82. MEM_CGROUP_TARGET_SOFTLIMIT,
  83. MEM_CGROUP_TARGET_NUMAINFO,
  84. MEM_CGROUP_NTARGETS,
  85. };
  86. struct mem_cgroup_stat_cpu {
  87. long count[MEMCG_NR_STAT];
  88. unsigned long events[NR_VM_EVENT_ITEMS];
  89. unsigned long nr_page_events;
  90. unsigned long targets[MEM_CGROUP_NTARGETS];
  91. };
  92. struct mem_cgroup_reclaim_iter {
  93. struct mem_cgroup *position;
  94. /* scan generation, increased every round-trip */
  95. unsigned int generation;
  96. };
  97. struct lruvec_stat {
  98. long count[NR_VM_NODE_STAT_ITEMS];
  99. };
  100. /*
  101. * per-zone information in memory controller.
  102. */
  103. struct mem_cgroup_per_node {
  104. struct lruvec lruvec;
  105. struct lruvec_stat __percpu *lruvec_stat_cpu;
  106. atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
  107. unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
  108. struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
  109. struct rb_node tree_node; /* RB tree node */
  110. unsigned long usage_in_excess;/* Set to the value by which */
  111. /* the soft limit is exceeded*/
  112. bool on_tree;
  113. bool congested; /* memcg has many dirty pages */
  114. /* backed by a congested BDI */
  115. struct mem_cgroup *memcg; /* Back pointer, we cannot */
  116. /* use container_of */
  117. };
  118. struct mem_cgroup_threshold {
  119. struct eventfd_ctx *eventfd;
  120. unsigned long threshold;
  121. };
  122. /* For threshold */
  123. struct mem_cgroup_threshold_ary {
  124. /* An array index points to threshold just below or equal to usage. */
  125. int current_threshold;
  126. /* Size of entries[] */
  127. unsigned int size;
  128. /* Array of thresholds */
  129. struct mem_cgroup_threshold entries[0];
  130. };
  131. struct mem_cgroup_thresholds {
  132. /* Primary thresholds array */
  133. struct mem_cgroup_threshold_ary *primary;
  134. /*
  135. * Spare threshold array.
  136. * This is needed to make mem_cgroup_unregister_event() "never fail".
  137. * It must be able to store at least primary->size - 1 entries.
  138. */
  139. struct mem_cgroup_threshold_ary *spare;
  140. };
  141. enum memcg_kmem_state {
  142. KMEM_NONE,
  143. KMEM_ALLOCATED,
  144. KMEM_ONLINE,
  145. };
  146. #if defined(CONFIG_SMP)
  147. struct memcg_padding {
  148. char x[0];
  149. } ____cacheline_internodealigned_in_smp;
  150. #define MEMCG_PADDING(name) struct memcg_padding name;
  151. #else
  152. #define MEMCG_PADDING(name)
  153. #endif
  154. /*
  155. * The memory controller data structure. The memory controller controls both
  156. * page cache and RSS per cgroup. We would eventually like to provide
  157. * statistics based on the statistics developed by Rik Van Riel for clock-pro,
  158. * to help the administrator determine what knobs to tune.
  159. */
  160. struct mem_cgroup {
  161. struct cgroup_subsys_state css;
  162. /* Private memcg ID. Used to ID objects that outlive the cgroup */
  163. struct mem_cgroup_id id;
  164. /* Accounted resources */
  165. struct page_counter memory;
  166. struct page_counter swap;
  167. /* Legacy consumer-oriented counters */
  168. struct page_counter memsw;
  169. struct page_counter kmem;
  170. struct page_counter tcpmem;
  171. /* Upper bound of normal memory consumption range */
  172. unsigned long high;
  173. /* Range enforcement for interrupt charges */
  174. struct work_struct high_work;
  175. unsigned long soft_limit;
  176. /* vmpressure notifications */
  177. struct vmpressure vmpressure;
  178. /*
  179. * Should the accounting and control be hierarchical, per subtree?
  180. */
  181. bool use_hierarchy;
  182. /* protected by memcg_oom_lock */
  183. bool oom_lock;
  184. int under_oom;
  185. int swappiness;
  186. /* OOM-Killer disable */
  187. int oom_kill_disable;
  188. /* memory.events */
  189. struct cgroup_file events_file;
  190. /* handle for "memory.swap.events" */
  191. struct cgroup_file swap_events_file;
  192. /* protect arrays of thresholds */
  193. struct mutex thresholds_lock;
  194. /* thresholds for memory usage. RCU-protected */
  195. struct mem_cgroup_thresholds thresholds;
  196. /* thresholds for mem+swap usage. RCU-protected */
  197. struct mem_cgroup_thresholds memsw_thresholds;
  198. /* For oom notifier event fd */
  199. struct list_head oom_notify;
  200. /*
  201. * Should we move charges of a task when a task is moved into this
  202. * mem_cgroup ? And what type of charges should we move ?
  203. */
  204. unsigned long move_charge_at_immigrate;
  205. /* taken only while moving_account > 0 */
  206. spinlock_t move_lock;
  207. unsigned long move_lock_flags;
  208. MEMCG_PADDING(_pad1_);
  209. /*
  210. * set > 0 if pages under this cgroup are moving to other cgroup.
  211. */
  212. atomic_t moving_account;
  213. struct task_struct *move_lock_task;
  214. /* memory.stat */
  215. struct mem_cgroup_stat_cpu __percpu *stat_cpu;
  216. MEMCG_PADDING(_pad2_);
  217. atomic_long_t stat[MEMCG_NR_STAT];
  218. atomic_long_t events[NR_VM_EVENT_ITEMS];
  219. atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
  220. unsigned long socket_pressure;
  221. /* Legacy tcp memory accounting */
  222. bool tcpmem_active;
  223. int tcpmem_pressure;
  224. #ifdef CONFIG_MEMCG_KMEM
  225. /* Index in the kmem_cache->memcg_params.memcg_caches array */
  226. int kmemcg_id;
  227. enum memcg_kmem_state kmem_state;
  228. struct list_head kmem_caches;
  229. #endif
  230. int last_scanned_node;
  231. #if MAX_NUMNODES > 1
  232. nodemask_t scan_nodes;
  233. atomic_t numainfo_events;
  234. atomic_t numainfo_updating;
  235. #endif
  236. #ifdef CONFIG_CGROUP_WRITEBACK
  237. struct list_head cgwb_list;
  238. struct wb_domain cgwb_domain;
  239. #endif
  240. /* List of events which userspace want to receive */
  241. struct list_head event_list;
  242. spinlock_t event_list_lock;
  243. struct mem_cgroup_per_node *nodeinfo[0];
  244. /* WARNING: nodeinfo must be the last member here */
  245. };
  246. /*
  247. * size of first charge trial. "32" comes from vmscan.c's magic value.
  248. * TODO: maybe necessary to use big numbers in big irons.
  249. */
  250. #define MEMCG_CHARGE_BATCH 32U
  251. extern struct mem_cgroup *root_mem_cgroup;
  252. static inline bool mem_cgroup_disabled(void)
  253. {
  254. return !cgroup_subsys_enabled(memory_cgrp_subsys);
  255. }
  256. enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
  257. struct mem_cgroup *memcg);
  258. int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  259. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  260. bool compound);
  261. int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
  262. gfp_t gfp_mask, struct mem_cgroup **memcgp,
  263. bool compound);
  264. void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  265. bool lrucare, bool compound);
  266. void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
  267. bool compound);
  268. void mem_cgroup_uncharge(struct page *page);
  269. void mem_cgroup_uncharge_list(struct list_head *page_list);
  270. void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
  271. static struct mem_cgroup_per_node *
  272. mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
  273. {
  274. return memcg->nodeinfo[nid];
  275. }
  276. /**
  277. * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
  278. * @node: node of the wanted lruvec
  279. * @memcg: memcg of the wanted lruvec
  280. *
  281. * Returns the lru list vector holding pages for a given @node or a given
  282. * @memcg and @zone. This can be the node lruvec, if the memory controller
  283. * is disabled.
  284. */
  285. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  286. struct mem_cgroup *memcg)
  287. {
  288. struct mem_cgroup_per_node *mz;
  289. struct lruvec *lruvec;
  290. if (mem_cgroup_disabled()) {
  291. lruvec = node_lruvec(pgdat);
  292. goto out;
  293. }
  294. mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
  295. lruvec = &mz->lruvec;
  296. out:
  297. /*
  298. * Since a node can be onlined after the mem_cgroup was created,
  299. * we have to be prepared to initialize lruvec->pgdat here;
  300. * and if offlined then reonlined, we need to reinitialize it.
  301. */
  302. if (unlikely(lruvec->pgdat != pgdat))
  303. lruvec->pgdat = pgdat;
  304. return lruvec;
  305. }
  306. struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
  307. bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
  308. struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  309. struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
  310. struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
  311. static inline
  312. struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
  313. return css ? container_of(css, struct mem_cgroup, css) : NULL;
  314. }
  315. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  316. {
  317. if (memcg)
  318. css_put(&memcg->css);
  319. }
  320. #define mem_cgroup_from_counter(counter, member) \
  321. container_of(counter, struct mem_cgroup, member)
  322. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  323. struct mem_cgroup *,
  324. struct mem_cgroup_reclaim_cookie *);
  325. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  326. int mem_cgroup_scan_tasks(struct mem_cgroup *,
  327. int (*)(struct task_struct *, void *), void *);
  328. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  329. {
  330. if (mem_cgroup_disabled())
  331. return 0;
  332. return memcg->id.id;
  333. }
  334. struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
  335. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  336. {
  337. struct mem_cgroup_per_node *mz;
  338. if (mem_cgroup_disabled())
  339. return NULL;
  340. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  341. return mz->memcg;
  342. }
  343. /**
  344. * parent_mem_cgroup - find the accounting parent of a memcg
  345. * @memcg: memcg whose parent to find
  346. *
  347. * Returns the parent memcg, or NULL if this is the root or the memory
  348. * controller is in legacy no-hierarchy mode.
  349. */
  350. static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  351. {
  352. if (!memcg->memory.parent)
  353. return NULL;
  354. return mem_cgroup_from_counter(memcg->memory.parent, memory);
  355. }
  356. static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  357. struct mem_cgroup *root)
  358. {
  359. if (root == memcg)
  360. return true;
  361. if (!root->use_hierarchy)
  362. return false;
  363. return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
  364. }
  365. static inline bool mm_match_cgroup(struct mm_struct *mm,
  366. struct mem_cgroup *memcg)
  367. {
  368. struct mem_cgroup *task_memcg;
  369. bool match = false;
  370. rcu_read_lock();
  371. task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  372. if (task_memcg)
  373. match = mem_cgroup_is_descendant(task_memcg, memcg);
  374. rcu_read_unlock();
  375. return match;
  376. }
  377. struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
  378. ino_t page_cgroup_ino(struct page *page);
  379. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  380. {
  381. if (mem_cgroup_disabled())
  382. return true;
  383. return !!(memcg->css.flags & CSS_ONLINE);
  384. }
  385. /*
  386. * For memory reclaim.
  387. */
  388. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  389. void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  390. int zid, int nr_pages);
  391. unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  392. int nid, unsigned int lru_mask);
  393. static inline
  394. unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  395. {
  396. struct mem_cgroup_per_node *mz;
  397. unsigned long nr_pages = 0;
  398. int zid;
  399. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  400. for (zid = 0; zid < MAX_NR_ZONES; zid++)
  401. nr_pages += mz->lru_zone_size[zid][lru];
  402. return nr_pages;
  403. }
  404. static inline
  405. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  406. enum lru_list lru, int zone_idx)
  407. {
  408. struct mem_cgroup_per_node *mz;
  409. mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  410. return mz->lru_zone_size[zone_idx][lru];
  411. }
  412. void mem_cgroup_handle_over_high(void);
  413. unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
  414. void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  415. struct task_struct *p);
  416. static inline void mem_cgroup_enter_user_fault(void)
  417. {
  418. WARN_ON(current->in_user_fault);
  419. current->in_user_fault = 1;
  420. }
  421. static inline void mem_cgroup_exit_user_fault(void)
  422. {
  423. WARN_ON(!current->in_user_fault);
  424. current->in_user_fault = 0;
  425. }
  426. static inline bool task_in_memcg_oom(struct task_struct *p)
  427. {
  428. return p->memcg_in_oom;
  429. }
  430. bool mem_cgroup_oom_synchronize(bool wait);
  431. #ifdef CONFIG_MEMCG_SWAP
  432. extern int do_swap_account;
  433. #endif
  434. struct mem_cgroup *lock_page_memcg(struct page *page);
  435. void __unlock_page_memcg(struct mem_cgroup *memcg);
  436. void unlock_page_memcg(struct page *page);
  437. /* idx can be of type enum memcg_stat_item or node_stat_item */
  438. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  439. int idx)
  440. {
  441. long x = atomic_long_read(&memcg->stat[idx]);
  442. #ifdef CONFIG_SMP
  443. if (x < 0)
  444. x = 0;
  445. #endif
  446. return x;
  447. }
  448. /* idx can be of type enum memcg_stat_item or node_stat_item */
  449. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  450. int idx, int val)
  451. {
  452. long x;
  453. if (mem_cgroup_disabled())
  454. return;
  455. x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
  456. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  457. atomic_long_add(x, &memcg->stat[idx]);
  458. x = 0;
  459. }
  460. __this_cpu_write(memcg->stat_cpu->count[idx], x);
  461. }
  462. /* idx can be of type enum memcg_stat_item or node_stat_item */
  463. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  464. int idx, int val)
  465. {
  466. unsigned long flags;
  467. local_irq_save(flags);
  468. __mod_memcg_state(memcg, idx, val);
  469. local_irq_restore(flags);
  470. }
  471. /**
  472. * mod_memcg_page_state - update page state statistics
  473. * @page: the page
  474. * @idx: page state item to account
  475. * @val: number of pages (positive or negative)
  476. *
  477. * The @page must be locked or the caller must use lock_page_memcg()
  478. * to prevent double accounting when the page is concurrently being
  479. * moved to another memcg:
  480. *
  481. * lock_page(page) or lock_page_memcg(page)
  482. * if (TestClearPageState(page))
  483. * mod_memcg_page_state(page, state, -1);
  484. * unlock_page(page) or unlock_page_memcg(page)
  485. *
  486. * Kernel pages are an exception to this, since they'll never move.
  487. */
  488. static inline void __mod_memcg_page_state(struct page *page,
  489. int idx, int val)
  490. {
  491. if (page->mem_cgroup)
  492. __mod_memcg_state(page->mem_cgroup, idx, val);
  493. }
  494. static inline void mod_memcg_page_state(struct page *page,
  495. int idx, int val)
  496. {
  497. if (page->mem_cgroup)
  498. mod_memcg_state(page->mem_cgroup, idx, val);
  499. }
  500. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  501. enum node_stat_item idx)
  502. {
  503. struct mem_cgroup_per_node *pn;
  504. long x;
  505. if (mem_cgroup_disabled())
  506. return node_page_state(lruvec_pgdat(lruvec), idx);
  507. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  508. x = atomic_long_read(&pn->lruvec_stat[idx]);
  509. #ifdef CONFIG_SMP
  510. if (x < 0)
  511. x = 0;
  512. #endif
  513. return x;
  514. }
  515. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  516. enum node_stat_item idx, int val)
  517. {
  518. struct mem_cgroup_per_node *pn;
  519. long x;
  520. /* Update node */
  521. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  522. if (mem_cgroup_disabled())
  523. return;
  524. pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  525. /* Update memcg */
  526. __mod_memcg_state(pn->memcg, idx, val);
  527. /* Update lruvec */
  528. x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
  529. if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
  530. atomic_long_add(x, &pn->lruvec_stat[idx]);
  531. x = 0;
  532. }
  533. __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
  534. }
  535. static inline void mod_lruvec_state(struct lruvec *lruvec,
  536. enum node_stat_item idx, int val)
  537. {
  538. unsigned long flags;
  539. local_irq_save(flags);
  540. __mod_lruvec_state(lruvec, idx, val);
  541. local_irq_restore(flags);
  542. }
  543. static inline void __mod_lruvec_page_state(struct page *page,
  544. enum node_stat_item idx, int val)
  545. {
  546. pg_data_t *pgdat = page_pgdat(page);
  547. struct lruvec *lruvec;
  548. /* Untracked pages have no memcg, no lruvec. Update only the node */
  549. if (!page->mem_cgroup) {
  550. __mod_node_page_state(pgdat, idx, val);
  551. return;
  552. }
  553. lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
  554. __mod_lruvec_state(lruvec, idx, val);
  555. }
  556. static inline void mod_lruvec_page_state(struct page *page,
  557. enum node_stat_item idx, int val)
  558. {
  559. unsigned long flags;
  560. local_irq_save(flags);
  561. __mod_lruvec_page_state(page, idx, val);
  562. local_irq_restore(flags);
  563. }
  564. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  565. gfp_t gfp_mask,
  566. unsigned long *total_scanned);
  567. static inline void __count_memcg_events(struct mem_cgroup *memcg,
  568. enum vm_event_item idx,
  569. unsigned long count)
  570. {
  571. unsigned long x;
  572. if (mem_cgroup_disabled())
  573. return;
  574. x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
  575. if (unlikely(x > MEMCG_CHARGE_BATCH)) {
  576. atomic_long_add(x, &memcg->events[idx]);
  577. x = 0;
  578. }
  579. __this_cpu_write(memcg->stat_cpu->events[idx], x);
  580. }
  581. static inline void count_memcg_events(struct mem_cgroup *memcg,
  582. enum vm_event_item idx,
  583. unsigned long count)
  584. {
  585. unsigned long flags;
  586. local_irq_save(flags);
  587. __count_memcg_events(memcg, idx, count);
  588. local_irq_restore(flags);
  589. }
  590. static inline void count_memcg_page_event(struct page *page,
  591. enum vm_event_item idx)
  592. {
  593. if (page->mem_cgroup)
  594. count_memcg_events(page->mem_cgroup, idx, 1);
  595. }
  596. static inline void count_memcg_event_mm(struct mm_struct *mm,
  597. enum vm_event_item idx)
  598. {
  599. struct mem_cgroup *memcg;
  600. if (mem_cgroup_disabled())
  601. return;
  602. rcu_read_lock();
  603. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  604. if (likely(memcg))
  605. count_memcg_events(memcg, idx, 1);
  606. rcu_read_unlock();
  607. }
  608. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  609. enum memcg_memory_event event)
  610. {
  611. atomic_long_inc(&memcg->memory_events[event]);
  612. cgroup_file_notify(&memcg->events_file);
  613. }
  614. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  615. enum memcg_memory_event event)
  616. {
  617. struct mem_cgroup *memcg;
  618. if (mem_cgroup_disabled())
  619. return;
  620. rcu_read_lock();
  621. memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  622. if (likely(memcg))
  623. memcg_memory_event(memcg, event);
  624. rcu_read_unlock();
  625. }
  626. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  627. void mem_cgroup_split_huge_fixup(struct page *head);
  628. #endif
  629. #else /* CONFIG_MEMCG */
  630. #define MEM_CGROUP_ID_SHIFT 0
  631. #define MEM_CGROUP_ID_MAX 0
  632. struct mem_cgroup;
  633. static inline bool mem_cgroup_disabled(void)
  634. {
  635. return true;
  636. }
  637. static inline void memcg_memory_event(struct mem_cgroup *memcg,
  638. enum memcg_memory_event event)
  639. {
  640. }
  641. static inline void memcg_memory_event_mm(struct mm_struct *mm,
  642. enum memcg_memory_event event)
  643. {
  644. }
  645. static inline enum mem_cgroup_protection mem_cgroup_protected(
  646. struct mem_cgroup *root, struct mem_cgroup *memcg)
  647. {
  648. return MEMCG_PROT_NONE;
  649. }
  650. static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  651. gfp_t gfp_mask,
  652. struct mem_cgroup **memcgp,
  653. bool compound)
  654. {
  655. *memcgp = NULL;
  656. return 0;
  657. }
  658. static inline int mem_cgroup_try_charge_delay(struct page *page,
  659. struct mm_struct *mm,
  660. gfp_t gfp_mask,
  661. struct mem_cgroup **memcgp,
  662. bool compound)
  663. {
  664. *memcgp = NULL;
  665. return 0;
  666. }
  667. static inline void mem_cgroup_commit_charge(struct page *page,
  668. struct mem_cgroup *memcg,
  669. bool lrucare, bool compound)
  670. {
  671. }
  672. static inline void mem_cgroup_cancel_charge(struct page *page,
  673. struct mem_cgroup *memcg,
  674. bool compound)
  675. {
  676. }
  677. static inline void mem_cgroup_uncharge(struct page *page)
  678. {
  679. }
  680. static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
  681. {
  682. }
  683. static inline void mem_cgroup_migrate(struct page *old, struct page *new)
  684. {
  685. }
  686. static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
  687. struct mem_cgroup *memcg)
  688. {
  689. return node_lruvec(pgdat);
  690. }
  691. static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  692. struct pglist_data *pgdat)
  693. {
  694. return &pgdat->lruvec;
  695. }
  696. static inline bool mm_match_cgroup(struct mm_struct *mm,
  697. struct mem_cgroup *memcg)
  698. {
  699. return true;
  700. }
  701. static inline bool task_in_mem_cgroup(struct task_struct *task,
  702. const struct mem_cgroup *memcg)
  703. {
  704. return true;
  705. }
  706. static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
  707. {
  708. return NULL;
  709. }
  710. static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
  711. {
  712. return NULL;
  713. }
  714. static inline void mem_cgroup_put(struct mem_cgroup *memcg)
  715. {
  716. }
  717. static inline struct mem_cgroup *
  718. mem_cgroup_iter(struct mem_cgroup *root,
  719. struct mem_cgroup *prev,
  720. struct mem_cgroup_reclaim_cookie *reclaim)
  721. {
  722. return NULL;
  723. }
  724. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  725. struct mem_cgroup *prev)
  726. {
  727. }
  728. static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
  729. int (*fn)(struct task_struct *, void *), void *arg)
  730. {
  731. return 0;
  732. }
  733. static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  734. {
  735. return 0;
  736. }
  737. static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
  738. {
  739. WARN_ON_ONCE(id);
  740. /* XXX: This should always return root_mem_cgroup */
  741. return NULL;
  742. }
  743. static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  744. {
  745. return NULL;
  746. }
  747. static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  748. {
  749. return true;
  750. }
  751. static inline unsigned long
  752. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  753. {
  754. return 0;
  755. }
  756. static inline
  757. unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  758. enum lru_list lru, int zone_idx)
  759. {
  760. return 0;
  761. }
  762. static inline unsigned long
  763. mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  764. int nid, unsigned int lru_mask)
  765. {
  766. return 0;
  767. }
  768. static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
  769. {
  770. return 0;
  771. }
  772. static inline void
  773. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  774. {
  775. }
  776. static inline struct mem_cgroup *lock_page_memcg(struct page *page)
  777. {
  778. return NULL;
  779. }
  780. static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
  781. {
  782. }
  783. static inline void unlock_page_memcg(struct page *page)
  784. {
  785. }
  786. static inline void mem_cgroup_handle_over_high(void)
  787. {
  788. }
  789. static inline void mem_cgroup_enter_user_fault(void)
  790. {
  791. }
  792. static inline void mem_cgroup_exit_user_fault(void)
  793. {
  794. }
  795. static inline bool task_in_memcg_oom(struct task_struct *p)
  796. {
  797. return false;
  798. }
  799. static inline bool mem_cgroup_oom_synchronize(bool wait)
  800. {
  801. return false;
  802. }
  803. static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
  804. int idx)
  805. {
  806. return 0;
  807. }
  808. static inline void __mod_memcg_state(struct mem_cgroup *memcg,
  809. int idx,
  810. int nr)
  811. {
  812. }
  813. static inline void mod_memcg_state(struct mem_cgroup *memcg,
  814. int idx,
  815. int nr)
  816. {
  817. }
  818. static inline void __mod_memcg_page_state(struct page *page,
  819. int idx,
  820. int nr)
  821. {
  822. }
  823. static inline void mod_memcg_page_state(struct page *page,
  824. int idx,
  825. int nr)
  826. {
  827. }
  828. static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  829. enum node_stat_item idx)
  830. {
  831. return node_page_state(lruvec_pgdat(lruvec), idx);
  832. }
  833. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  834. enum node_stat_item idx, int val)
  835. {
  836. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  837. }
  838. static inline void mod_lruvec_state(struct lruvec *lruvec,
  839. enum node_stat_item idx, int val)
  840. {
  841. mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  842. }
  843. static inline void __mod_lruvec_page_state(struct page *page,
  844. enum node_stat_item idx, int val)
  845. {
  846. __mod_node_page_state(page_pgdat(page), idx, val);
  847. }
  848. static inline void mod_lruvec_page_state(struct page *page,
  849. enum node_stat_item idx, int val)
  850. {
  851. mod_node_page_state(page_pgdat(page), idx, val);
  852. }
  853. static inline
  854. unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
  855. gfp_t gfp_mask,
  856. unsigned long *total_scanned)
  857. {
  858. return 0;
  859. }
  860. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  861. {
  862. }
  863. static inline void count_memcg_events(struct mem_cgroup *memcg,
  864. enum vm_event_item idx,
  865. unsigned long count)
  866. {
  867. }
  868. static inline void count_memcg_page_event(struct page *page,
  869. int idx)
  870. {
  871. }
  872. static inline
  873. void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
  874. {
  875. }
  876. #endif /* CONFIG_MEMCG */
  877. /* idx can be of type enum memcg_stat_item or node_stat_item */
  878. static inline void __inc_memcg_state(struct mem_cgroup *memcg,
  879. int idx)
  880. {
  881. __mod_memcg_state(memcg, idx, 1);
  882. }
  883. /* idx can be of type enum memcg_stat_item or node_stat_item */
  884. static inline void __dec_memcg_state(struct mem_cgroup *memcg,
  885. int idx)
  886. {
  887. __mod_memcg_state(memcg, idx, -1);
  888. }
  889. /* idx can be of type enum memcg_stat_item or node_stat_item */
  890. static inline void __inc_memcg_page_state(struct page *page,
  891. int idx)
  892. {
  893. __mod_memcg_page_state(page, idx, 1);
  894. }
  895. /* idx can be of type enum memcg_stat_item or node_stat_item */
  896. static inline void __dec_memcg_page_state(struct page *page,
  897. int idx)
  898. {
  899. __mod_memcg_page_state(page, idx, -1);
  900. }
  901. static inline void __inc_lruvec_state(struct lruvec *lruvec,
  902. enum node_stat_item idx)
  903. {
  904. __mod_lruvec_state(lruvec, idx, 1);
  905. }
  906. static inline void __dec_lruvec_state(struct lruvec *lruvec,
  907. enum node_stat_item idx)
  908. {
  909. __mod_lruvec_state(lruvec, idx, -1);
  910. }
  911. static inline void __inc_lruvec_page_state(struct page *page,
  912. enum node_stat_item idx)
  913. {
  914. __mod_lruvec_page_state(page, idx, 1);
  915. }
  916. static inline void __dec_lruvec_page_state(struct page *page,
  917. enum node_stat_item idx)
  918. {
  919. __mod_lruvec_page_state(page, idx, -1);
  920. }
  921. /* idx can be of type enum memcg_stat_item or node_stat_item */
  922. static inline void inc_memcg_state(struct mem_cgroup *memcg,
  923. int idx)
  924. {
  925. mod_memcg_state(memcg, idx, 1);
  926. }
  927. /* idx can be of type enum memcg_stat_item or node_stat_item */
  928. static inline void dec_memcg_state(struct mem_cgroup *memcg,
  929. int idx)
  930. {
  931. mod_memcg_state(memcg, idx, -1);
  932. }
  933. /* idx can be of type enum memcg_stat_item or node_stat_item */
  934. static inline void inc_memcg_page_state(struct page *page,
  935. int idx)
  936. {
  937. mod_memcg_page_state(page, idx, 1);
  938. }
  939. /* idx can be of type enum memcg_stat_item or node_stat_item */
  940. static inline void dec_memcg_page_state(struct page *page,
  941. int idx)
  942. {
  943. mod_memcg_page_state(page, idx, -1);
  944. }
  945. static inline void inc_lruvec_state(struct lruvec *lruvec,
  946. enum node_stat_item idx)
  947. {
  948. mod_lruvec_state(lruvec, idx, 1);
  949. }
  950. static inline void dec_lruvec_state(struct lruvec *lruvec,
  951. enum node_stat_item idx)
  952. {
  953. mod_lruvec_state(lruvec, idx, -1);
  954. }
  955. static inline void inc_lruvec_page_state(struct page *page,
  956. enum node_stat_item idx)
  957. {
  958. mod_lruvec_page_state(page, idx, 1);
  959. }
  960. static inline void dec_lruvec_page_state(struct page *page,
  961. enum node_stat_item idx)
  962. {
  963. mod_lruvec_page_state(page, idx, -1);
  964. }
  965. #ifdef CONFIG_CGROUP_WRITEBACK
  966. struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
  967. void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
  968. unsigned long *pheadroom, unsigned long *pdirty,
  969. unsigned long *pwriteback);
  970. #else /* CONFIG_CGROUP_WRITEBACK */
  971. static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
  972. {
  973. return NULL;
  974. }
  975. static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
  976. unsigned long *pfilepages,
  977. unsigned long *pheadroom,
  978. unsigned long *pdirty,
  979. unsigned long *pwriteback)
  980. {
  981. }
  982. #endif /* CONFIG_CGROUP_WRITEBACK */
  983. struct sock;
  984. bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  985. void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  986. #ifdef CONFIG_MEMCG
  987. extern struct static_key_false memcg_sockets_enabled_key;
  988. #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
  989. void mem_cgroup_sk_alloc(struct sock *sk);
  990. void mem_cgroup_sk_free(struct sock *sk);
  991. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  992. {
  993. if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
  994. return true;
  995. do {
  996. if (time_before(jiffies, memcg->socket_pressure))
  997. return true;
  998. } while ((memcg = parent_mem_cgroup(memcg)));
  999. return false;
  1000. }
  1001. #else
  1002. #define mem_cgroup_sockets_enabled 0
  1003. static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
  1004. static inline void mem_cgroup_sk_free(struct sock *sk) { };
  1005. static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
  1006. {
  1007. return false;
  1008. }
  1009. #endif
  1010. struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  1011. void memcg_kmem_put_cache(struct kmem_cache *cachep);
  1012. int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  1013. struct mem_cgroup *memcg);
  1014. int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  1015. void memcg_kmem_uncharge(struct page *page, int order);
  1016. #ifdef CONFIG_MEMCG_KMEM
  1017. extern struct static_key_false memcg_kmem_enabled_key;
  1018. extern struct workqueue_struct *memcg_kmem_cache_wq;
  1019. extern int memcg_nr_cache_ids;
  1020. void memcg_get_cache_ids(void);
  1021. void memcg_put_cache_ids(void);
  1022. /*
  1023. * Helper macro to loop through all memcg-specific caches. Callers must still
  1024. * check if the cache is valid (it is either valid or NULL).
  1025. * the slab_mutex must be held when looping through those caches
  1026. */
  1027. #define for_each_memcg_cache_index(_idx) \
  1028. for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
  1029. static inline bool memcg_kmem_enabled(void)
  1030. {
  1031. return static_branch_unlikely(&memcg_kmem_enabled_key);
  1032. }
  1033. /*
  1034. * helper for accessing a memcg's index. It will be used as an index in the
  1035. * child cache array in kmem_cache, and also to derive its name. This function
  1036. * will return -1 when this is not a kmem-limited memcg.
  1037. */
  1038. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1039. {
  1040. return memcg ? memcg->kmemcg_id : -1;
  1041. }
  1042. #else
  1043. #define for_each_memcg_cache_index(_idx) \
  1044. for (; NULL; )
  1045. static inline bool memcg_kmem_enabled(void)
  1046. {
  1047. return false;
  1048. }
  1049. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  1050. {
  1051. return -1;
  1052. }
  1053. static inline void memcg_get_cache_ids(void)
  1054. {
  1055. }
  1056. static inline void memcg_put_cache_ids(void)
  1057. {
  1058. }
  1059. #endif /* CONFIG_MEMCG_KMEM */
  1060. #endif /* _LINUX_MEMCONTROL_H */