|
@@ -196,7 +196,9 @@ void __delete_from_page_cache(struct page *page, void *shadow)
|
|
|
page->mapping = NULL;
|
|
|
/* Leave page->index set: truncation lookup relies upon it */
|
|
|
|
|
|
- __dec_zone_page_state(page, NR_FILE_PAGES);
|
|
|
+ /* hugetlb pages do not participate in page cache accounting. */
|
|
|
+ if (!PageHuge(page))
|
|
|
+ __dec_zone_page_state(page, NR_FILE_PAGES);
|
|
|
if (PageSwapBacked(page))
|
|
|
__dec_zone_page_state(page, NR_SHMEM);
|
|
|
BUG_ON(page_mapped(page));
|
|
@@ -483,7 +485,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
|
|
error = radix_tree_insert(&mapping->page_tree, offset, new);
|
|
|
BUG_ON(error);
|
|
|
mapping->nrpages++;
|
|
|
- __inc_zone_page_state(new, NR_FILE_PAGES);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * hugetlb pages do not participate in page cache accounting.
|
|
|
+ */
|
|
|
+ if (!PageHuge(new))
|
|
|
+ __inc_zone_page_state(new, NR_FILE_PAGES);
|
|
|
if (PageSwapBacked(new))
|
|
|
__inc_zone_page_state(new, NR_SHMEM);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
@@ -575,7 +582,10 @@ static int __add_to_page_cache_locked(struct page *page,
|
|
|
radix_tree_preload_end();
|
|
|
if (unlikely(error))
|
|
|
goto err_insert;
|
|
|
- __inc_zone_page_state(page, NR_FILE_PAGES);
|
|
|
+
|
|
|
+ /* hugetlb pages do not participate in page cache accounting. */
|
|
|
+ if (!huge)
|
|
|
+ __inc_zone_page_state(page, NR_FILE_PAGES);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
if (!huge)
|
|
|
mem_cgroup_commit_charge(page, memcg, false);
|