memcontrol.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/jump_label.h>
  25. struct mem_cgroup;
  26. struct page_cgroup;
  27. struct page;
  28. struct mm_struct;
  29. struct kmem_cache;
  30. /*
  31. * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
  32. * These two lists should keep in accord with each other.
  33. */
  34. enum mem_cgroup_stat_index {
  35. /*
  36. * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  37. */
  38. MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
  39. MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
  40. MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
  41. MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
  42. MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
  43. MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
  44. MEM_CGROUP_STAT_NSTATS,
  45. };
  46. struct mem_cgroup_reclaim_cookie {
  47. struct zone *zone;
  48. int priority;
  49. unsigned int generation;
  50. };
  51. #ifdef CONFIG_MEMCG
  52. /*
  53. * All "charge" functions with gfp_mask should use GFP_KERNEL or
  54. * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
  55. * alloc memory but reclaims memory from all available zones. So, "where I want
  56. * memory from" bits of gfp_mask has no meaning. So any bits of that field is
  57. * available but adding a rule is better. charge functions' gfp_mask should
  58. * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
  59. * codes.
  60. * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
  61. */
  62. extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
  63. gfp_t gfp_mask);
  64. /* for swap handling */
  65. extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  66. struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
  67. extern void mem_cgroup_commit_charge_swapin(struct page *page,
  68. struct mem_cgroup *memcg);
  69. extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
  70. extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  71. gfp_t gfp_mask);
  72. struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  73. struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
  74. /* For coalescing uncharge for reducing memcg' overhead*/
  75. extern void mem_cgroup_uncharge_start(void);
  76. extern void mem_cgroup_uncharge_end(void);
  77. extern void mem_cgroup_uncharge_page(struct page *page);
  78. extern void mem_cgroup_uncharge_cache_page(struct page *page);
  79. bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  80. struct mem_cgroup *memcg);
  81. bool task_in_mem_cgroup(struct task_struct *task,
  82. const struct mem_cgroup *memcg);
  83. extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
  84. extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  85. extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
  86. extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
  87. static inline
  88. bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
  89. {
  90. struct mem_cgroup *task_memcg;
  91. bool match;
  92. rcu_read_lock();
  93. task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
  94. match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
  95. rcu_read_unlock();
  96. return match;
  97. }
  98. extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
  99. extern void
  100. mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  101. struct mem_cgroup **memcgp);
  102. extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  103. struct page *oldpage, struct page *newpage, bool migration_ok);
  104. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  105. struct mem_cgroup *,
  106. struct mem_cgroup_reclaim_cookie *);
  107. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  108. /*
  109. * For memory reclaim.
  110. */
  111. int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
  112. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  113. unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
  114. void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
  115. extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  116. struct task_struct *p);
  117. extern void mem_cgroup_replace_page_cache(struct page *oldpage,
  118. struct page *newpage);
  119. static inline void mem_cgroup_oom_enable(void)
  120. {
  121. WARN_ON(current->memcg_oom.may_oom);
  122. current->memcg_oom.may_oom = 1;
  123. }
  124. static inline void mem_cgroup_oom_disable(void)
  125. {
  126. WARN_ON(!current->memcg_oom.may_oom);
  127. current->memcg_oom.may_oom = 0;
  128. }
  129. static inline bool task_in_memcg_oom(struct task_struct *p)
  130. {
  131. return p->memcg_oom.memcg;
  132. }
  133. bool mem_cgroup_oom_synchronize(bool wait);
  134. #ifdef CONFIG_MEMCG_SWAP
  135. extern int do_swap_account;
  136. #endif
  137. static inline bool mem_cgroup_disabled(void)
  138. {
  139. if (memory_cgrp_subsys.disabled)
  140. return true;
  141. return false;
  142. }
  143. void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
  144. unsigned long *flags);
  145. extern atomic_t memcg_moving;
  146. static inline void mem_cgroup_begin_update_page_stat(struct page *page,
  147. bool *locked, unsigned long *flags)
  148. {
  149. if (mem_cgroup_disabled())
  150. return;
  151. rcu_read_lock();
  152. *locked = false;
  153. if (atomic_read(&memcg_moving))
  154. __mem_cgroup_begin_update_page_stat(page, locked, flags);
  155. }
  156. void __mem_cgroup_end_update_page_stat(struct page *page,
  157. unsigned long *flags);
  158. static inline void mem_cgroup_end_update_page_stat(struct page *page,
  159. bool *locked, unsigned long *flags)
  160. {
  161. if (mem_cgroup_disabled())
  162. return;
  163. if (*locked)
  164. __mem_cgroup_end_update_page_stat(page, flags);
  165. rcu_read_unlock();
  166. }
  167. void mem_cgroup_update_page_stat(struct page *page,
  168. enum mem_cgroup_stat_index idx,
  169. int val);
  170. static inline void mem_cgroup_inc_page_stat(struct page *page,
  171. enum mem_cgroup_stat_index idx)
  172. {
  173. mem_cgroup_update_page_stat(page, idx, 1);
  174. }
  175. static inline void mem_cgroup_dec_page_stat(struct page *page,
  176. enum mem_cgroup_stat_index idx)
  177. {
  178. mem_cgroup_update_page_stat(page, idx, -1);
  179. }
  180. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  181. gfp_t gfp_mask,
  182. unsigned long *total_scanned);
  183. void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
  184. static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
  185. enum vm_event_item idx)
  186. {
  187. if (mem_cgroup_disabled())
  188. return;
  189. __mem_cgroup_count_vm_event(mm, idx);
  190. }
  191. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  192. void mem_cgroup_split_huge_fixup(struct page *head);
  193. #endif
  194. #ifdef CONFIG_DEBUG_VM
  195. bool mem_cgroup_bad_page_check(struct page *page);
  196. void mem_cgroup_print_bad_page(struct page *page);
  197. #endif
  198. #else /* CONFIG_MEMCG */
  199. struct mem_cgroup;
  200. static inline int mem_cgroup_newpage_charge(struct page *page,
  201. struct mm_struct *mm, gfp_t gfp_mask)
  202. {
  203. return 0;
  204. }
  205. static inline int mem_cgroup_cache_charge(struct page *page,
  206. struct mm_struct *mm, gfp_t gfp_mask)
  207. {
  208. return 0;
  209. }
  210. static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  211. struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
  212. {
  213. return 0;
  214. }
  215. static inline void mem_cgroup_commit_charge_swapin(struct page *page,
  216. struct mem_cgroup *memcg)
  217. {
  218. }
  219. static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  220. {
  221. }
  222. static inline void mem_cgroup_uncharge_start(void)
  223. {
  224. }
  225. static inline void mem_cgroup_uncharge_end(void)
  226. {
  227. }
  228. static inline void mem_cgroup_uncharge_page(struct page *page)
  229. {
  230. }
  231. static inline void mem_cgroup_uncharge_cache_page(struct page *page)
  232. {
  233. }
  234. static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  235. struct mem_cgroup *memcg)
  236. {
  237. return &zone->lruvec;
  238. }
  239. static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  240. struct zone *zone)
  241. {
  242. return &zone->lruvec;
  243. }
  244. static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  245. {
  246. return NULL;
  247. }
  248. static inline bool mm_match_cgroup(struct mm_struct *mm,
  249. struct mem_cgroup *memcg)
  250. {
  251. return true;
  252. }
  253. static inline bool task_in_mem_cgroup(struct task_struct *task,
  254. const struct mem_cgroup *memcg)
  255. {
  256. return true;
  257. }
  258. static inline struct cgroup_subsys_state
  259. *mem_cgroup_css(struct mem_cgroup *memcg)
  260. {
  261. return NULL;
  262. }
  263. static inline void
  264. mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  265. struct mem_cgroup **memcgp)
  266. {
  267. }
  268. static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  269. struct page *oldpage, struct page *newpage, bool migration_ok)
  270. {
  271. }
  272. static inline struct mem_cgroup *
  273. mem_cgroup_iter(struct mem_cgroup *root,
  274. struct mem_cgroup *prev,
  275. struct mem_cgroup_reclaim_cookie *reclaim)
  276. {
  277. return NULL;
  278. }
  279. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  280. struct mem_cgroup *prev)
  281. {
  282. }
  283. static inline bool mem_cgroup_disabled(void)
  284. {
  285. return true;
  286. }
  287. static inline int
  288. mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
  289. {
  290. return 1;
  291. }
  292. static inline unsigned long
  293. mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  294. {
  295. return 0;
  296. }
  297. static inline void
  298. mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  299. int increment)
  300. {
  301. }
  302. static inline void
  303. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  304. {
  305. }
  306. static inline void mem_cgroup_begin_update_page_stat(struct page *page,
  307. bool *locked, unsigned long *flags)
  308. {
  309. }
  310. static inline void mem_cgroup_end_update_page_stat(struct page *page,
  311. bool *locked, unsigned long *flags)
  312. {
  313. }
  314. static inline void mem_cgroup_oom_enable(void)
  315. {
  316. }
  317. static inline void mem_cgroup_oom_disable(void)
  318. {
  319. }
  320. static inline bool task_in_memcg_oom(struct task_struct *p)
  321. {
  322. return false;
  323. }
  324. static inline bool mem_cgroup_oom_synchronize(bool wait)
  325. {
  326. return false;
  327. }
  328. static inline void mem_cgroup_inc_page_stat(struct page *page,
  329. enum mem_cgroup_stat_index idx)
  330. {
  331. }
  332. static inline void mem_cgroup_dec_page_stat(struct page *page,
  333. enum mem_cgroup_stat_index idx)
  334. {
  335. }
  336. static inline
  337. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  338. gfp_t gfp_mask,
  339. unsigned long *total_scanned)
  340. {
  341. return 0;
  342. }
  343. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  344. {
  345. }
  346. static inline
  347. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  348. {
  349. }
  350. static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
  351. struct page *newpage)
  352. {
  353. }
  354. #endif /* CONFIG_MEMCG */
  355. #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
  356. static inline bool
  357. mem_cgroup_bad_page_check(struct page *page)
  358. {
  359. return false;
  360. }
  361. static inline void
  362. mem_cgroup_print_bad_page(struct page *page)
  363. {
  364. }
  365. #endif
  366. enum {
  367. UNDER_LIMIT,
  368. SOFT_LIMIT,
  369. OVER_LIMIT,
  370. };
  371. struct sock;
  372. #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
  373. void sock_update_memcg(struct sock *sk);
  374. void sock_release_memcg(struct sock *sk);
  375. #else
  376. static inline void sock_update_memcg(struct sock *sk)
  377. {
  378. }
  379. static inline void sock_release_memcg(struct sock *sk)
  380. {
  381. }
  382. #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
  383. #ifdef CONFIG_MEMCG_KMEM
  384. extern struct static_key memcg_kmem_enabled_key;
  385. extern int memcg_limited_groups_array_size;
  386. /*
  387. * Helper macro to loop through all memcg-specific caches. Callers must still
  388. * check if the cache is valid (it is either valid or NULL).
  389. * the slab_mutex must be held when looping through those caches
  390. */
  391. #define for_each_memcg_cache_index(_idx) \
  392. for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
  393. static inline bool memcg_kmem_enabled(void)
  394. {
  395. return static_key_false(&memcg_kmem_enabled_key);
  396. }
  397. /*
  398. * In general, we'll do everything in our power to not incur in any overhead
  399. * for non-memcg users for the kmem functions. Not even a function call, if we
  400. * can avoid it.
  401. *
  402. * Therefore, we'll inline all those functions so that in the best case, we'll
  403. * see that kmemcg is off for everybody and proceed quickly. If it is on,
  404. * we'll still do most of the flag checking inline. We check a lot of
  405. * conditions, but because they are pretty simple, they are expected to be
  406. * fast.
  407. */
  408. bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
  409. int order);
  410. void __memcg_kmem_commit_charge(struct page *page,
  411. struct mem_cgroup *memcg, int order);
  412. void __memcg_kmem_uncharge_pages(struct page *page, int order);
  413. int memcg_cache_id(struct mem_cgroup *memcg);
  414. int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
  415. struct kmem_cache *root_cache);
  416. void memcg_free_cache_params(struct kmem_cache *s);
  417. void memcg_register_cache(struct kmem_cache *s);
  418. void memcg_unregister_cache(struct kmem_cache *s);
  419. int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
  420. void memcg_update_array_size(int num_groups);
  421. struct kmem_cache *
  422. __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
  423. void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
  424. void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
  425. /**
  426. * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
  427. * @gfp: the gfp allocation flags.
  428. * @memcg: a pointer to the memcg this was charged against.
  429. * @order: allocation order.
  430. *
  431. * returns true if the memcg where the current task belongs can hold this
  432. * allocation.
  433. *
  434. * We return true automatically if this allocation is not to be accounted to
  435. * any memcg.
  436. */
  437. static inline bool
  438. memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
  439. {
  440. if (!memcg_kmem_enabled())
  441. return true;
  442. /*
  443. * __GFP_NOFAIL allocations will move on even if charging is not
  444. * possible. Therefore we don't even try, and have this allocation
  445. * unaccounted. We could in theory charge it with
  446. * res_counter_charge_nofail, but we hope those allocations are rare,
  447. * and won't be worth the trouble.
  448. */
  449. if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
  450. return true;
  451. if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
  452. return true;
  453. /* If the test is dying, just let it go. */
  454. if (unlikely(fatal_signal_pending(current)))
  455. return true;
  456. return __memcg_kmem_newpage_charge(gfp, memcg, order);
  457. }
  458. /**
  459. * memcg_kmem_uncharge_pages: uncharge pages from memcg
  460. * @page: pointer to struct page being freed
  461. * @order: allocation order.
  462. *
  463. * there is no need to specify memcg here, since it is embedded in page_cgroup
  464. */
  465. static inline void
  466. memcg_kmem_uncharge_pages(struct page *page, int order)
  467. {
  468. if (memcg_kmem_enabled())
  469. __memcg_kmem_uncharge_pages(page, order);
  470. }
  471. /**
  472. * memcg_kmem_commit_charge: embeds correct memcg in a page
  473. * @page: pointer to struct page recently allocated
  474. * @memcg: the memcg structure we charged against
  475. * @order: allocation order.
  476. *
  477. * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
  478. * failure of the allocation. if @page is NULL, this function will revert the
  479. * charges. Otherwise, it will commit the memcg given by @memcg to the
  480. * corresponding page_cgroup.
  481. */
  482. static inline void
  483. memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
  484. {
  485. if (memcg_kmem_enabled() && memcg)
  486. __memcg_kmem_commit_charge(page, memcg, order);
  487. }
  488. /**
  489. * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
  490. * @cachep: the original global kmem cache
  491. * @gfp: allocation flags.
  492. *
  493. * This function assumes that the task allocating, which determines the memcg
  494. * in the page allocator, belongs to the same cgroup throughout the whole
  495. * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
  496. * while belonging to a cgroup, and later on changes. This is considered
  497. * acceptable, and should only happen upon task migration.
  498. *
  499. * Before the cache is created by the memcg core, there is also a possible
  500. * imbalance: the task belongs to a memcg, but the cache being allocated from
  501. * is the global cache, since the child cache is not yet guaranteed to be
  502. * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
  503. * passed and the page allocator will not attempt any cgroup accounting.
  504. */
  505. static __always_inline struct kmem_cache *
  506. memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
  507. {
  508. if (!memcg_kmem_enabled())
  509. return cachep;
  510. if (gfp & __GFP_NOFAIL)
  511. return cachep;
  512. if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
  513. return cachep;
  514. if (unlikely(fatal_signal_pending(current)))
  515. return cachep;
  516. return __memcg_kmem_get_cache(cachep, gfp);
  517. }
  518. #else
  519. #define for_each_memcg_cache_index(_idx) \
  520. for (; NULL; )
  521. static inline bool memcg_kmem_enabled(void)
  522. {
  523. return false;
  524. }
  525. static inline bool
  526. memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
  527. {
  528. return true;
  529. }
  530. static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
  531. {
  532. }
  533. static inline void
  534. memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
  535. {
  536. }
  537. static inline int memcg_cache_id(struct mem_cgroup *memcg)
  538. {
  539. return -1;
  540. }
  541. static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
  542. struct kmem_cache *s, struct kmem_cache *root_cache)
  543. {
  544. return 0;
  545. }
  546. static inline void memcg_free_cache_params(struct kmem_cache *s)
  547. {
  548. }
  549. static inline void memcg_register_cache(struct kmem_cache *s)
  550. {
  551. }
  552. static inline void memcg_unregister_cache(struct kmem_cache *s)
  553. {
  554. }
  555. static inline struct kmem_cache *
  556. memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
  557. {
  558. return cachep;
  559. }
  560. static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
  561. {
  562. }
  563. #endif /* CONFIG_MEMCG_KMEM */
  564. #endif /* _LINUX_MEMCONTROL_H */