|
@@ -63,6 +63,7 @@
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/page_owner.h>
|
|
#include <linux/page_owner.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/kthread.h>
|
|
|
|
+#include <linux/memcontrol.h>
|
|
|
|
|
|
#include <asm/sections.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
@@ -1018,6 +1019,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
|
}
|
|
}
|
|
if (PageMappingFlags(page))
|
|
if (PageMappingFlags(page))
|
|
page->mapping = NULL;
|
|
page->mapping = NULL;
|
|
|
|
+ if (memcg_kmem_enabled() && PageKmemcg(page)) {
|
|
|
|
+ memcg_kmem_uncharge(page, order);
|
|
|
|
+ __ClearPageKmemcg(page);
|
|
|
|
+ }
|
|
if (check_free)
|
|
if (check_free)
|
|
bad += free_pages_check(page);
|
|
bad += free_pages_check(page);
|
|
if (bad)
|
|
if (bad)
|
|
@@ -3841,6 +3846,14 @@ no_zone:
|
|
}
|
|
}
|
|
|
|
|
|
out:
|
|
out:
|
|
|
|
+ if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
|
|
|
|
+ if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
|
|
|
|
+ __free_pages(page, order);
|
|
|
|
+ page = NULL;
|
|
|
|
+ } else
|
|
|
|
+ __SetPageKmemcg(page);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (kmemcheck_enabled && page)
|
|
if (kmemcheck_enabled && page)
|
|
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
|
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
|
|
|
|
|
@@ -3996,59 +4009,6 @@ void __free_page_frag(void *addr)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__free_page_frag);
|
|
EXPORT_SYMBOL(__free_page_frag);
|
|
|
|
|
|
-/*
|
|
|
|
- * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
|
|
|
|
- * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
|
|
|
|
- * equivalent to alloc_pages.
|
|
|
|
- *
|
|
|
|
- * It should be used when the caller would like to use kmalloc, but since the
|
|
|
|
- * allocation is large, it has to fall back to the page allocator.
|
|
|
|
- */
|
|
|
|
-struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
|
|
|
|
-{
|
|
|
|
- struct page *page;
|
|
|
|
-
|
|
|
|
- page = alloc_pages(gfp_mask, order);
|
|
|
|
- if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
|
|
|
|
- page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
|
|
|
|
- __free_pages(page, order);
|
|
|
|
- page = NULL;
|
|
|
|
- }
|
|
|
|
- return page;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
|
|
|
|
-{
|
|
|
|
- struct page *page;
|
|
|
|
-
|
|
|
|
- page = alloc_pages_node(nid, gfp_mask, order);
|
|
|
|
- if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) &&
|
|
|
|
- page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
|
|
|
|
- __free_pages(page, order);
|
|
|
|
- page = NULL;
|
|
|
|
- }
|
|
|
|
- return page;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * __free_kmem_pages and free_kmem_pages will free pages allocated with
|
|
|
|
- * alloc_kmem_pages.
|
|
|
|
- */
|
|
|
|
-void __free_kmem_pages(struct page *page, unsigned int order)
|
|
|
|
-{
|
|
|
|
- if (memcg_kmem_enabled())
|
|
|
|
- memcg_kmem_uncharge(page, order);
|
|
|
|
- __free_pages(page, order);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void free_kmem_pages(unsigned long addr, unsigned int order)
|
|
|
|
-{
|
|
|
|
- if (addr != 0) {
|
|
|
|
- VM_BUG_ON(!virt_addr_valid((void *)addr));
|
|
|
|
- __free_kmem_pages(virt_to_page((void *)addr), order);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
|
static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
|
size_t size)
|
|
size_t size)
|
|
{
|
|
{
|