|
@@ -1122,7 +1122,7 @@ static void __page_check_anon_rmap(struct page *page,
|
|
|
* over the call to page_add_new_anon_rmap.
|
|
|
*/
|
|
|
BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
|
|
|
- BUG_ON(page->index != linear_page_index(vma, address));
|
|
|
+ BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
|
|
|
#endif
|
|
|
}
|
|
|
|
|
@@ -1152,9 +1152,28 @@ void page_add_anon_rmap(struct page *page,
|
|
|
void do_page_add_anon_rmap(struct page *page,
|
|
|
struct vm_area_struct *vma, unsigned long address, int flags)
|
|
|
{
|
|
|
- int first = atomic_inc_and_test(&page->_mapcount);
|
|
|
+ bool compound = flags & RMAP_COMPOUND;
|
|
|
+ bool first;
|
|
|
+
|
|
|
+ if (PageTransCompound(page)) {
|
|
|
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
+ if (compound) {
|
|
|
+ atomic_t *mapcount;
|
|
|
+
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
+ mapcount = compound_mapcount_ptr(page);
|
|
|
+ first = atomic_inc_and_test(mapcount);
|
|
|
+ } else {
|
|
|
+ /* Anon THP always mapped first with PMD */
|
|
|
+ first = 0;
|
|
|
+ VM_BUG_ON_PAGE(!page_mapcount(page), page);
|
|
|
+ atomic_inc(&page->_mapcount);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ first = atomic_inc_and_test(&page->_mapcount);
|
|
|
+ }
|
|
|
+
|
|
|
if (first) {
|
|
|
- bool compound = flags & RMAP_COMPOUND;
|
|
|
int nr = compound ? hpage_nr_pages(page) : 1;
|
|
|
/*
|
|
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
@@ -1173,6 +1192,7 @@ void do_page_add_anon_rmap(struct page *page,
|
|
|
return;
|
|
|
|
|
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
+
|
|
|
/* address might be in next vma when migration races vma_adjust */
|
|
|
if (first)
|
|
|
__page_set_anon_rmap(page, vma, address,
|
|
@@ -1199,10 +1219,16 @@ void page_add_new_anon_rmap(struct page *page,
|
|
|
|
|
|
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
|
|
SetPageSwapBacked(page);
|
|
|
- atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
|
|
|
if (compound) {
|
|
|
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
+ /* increment count (starts at -1) */
|
|
|
+ atomic_set(compound_mapcount_ptr(page), 0);
|
|
|
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
+ } else {
|
|
|
+ /* Anon THP always mapped first with PMD */
|
|
|
+ VM_BUG_ON_PAGE(PageTransCompound(page), page);
|
|
|
+ /* increment count (starts at -1) */
|
|
|
+ atomic_set(&page->_mapcount, 0);
|
|
|
}
|
|
|
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
|
|
__page_set_anon_rmap(page, vma, address, 1);
|
|
@@ -1232,12 +1258,15 @@ static void page_remove_file_rmap(struct page *page)
|
|
|
|
|
|
memcg = mem_cgroup_begin_page_stat(page);
|
|
|
|
|
|
- /* page still mapped by someone else? */
|
|
|
- if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
+ /* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
|
|
+ if (unlikely(PageHuge(page))) {
|
|
|
+ /* hugetlb pages are always mapped with pmds */
|
|
|
+ atomic_dec(compound_mapcount_ptr(page));
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
- /* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
|
|
- if (unlikely(PageHuge(page)))
|
|
|
+ /* page still mapped by someone else? */
|
|
|
+ if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
goto out;
|
|
|
|
|
|
/*
|
|
@@ -1254,6 +1283,39 @@ out:
|
|
|
mem_cgroup_end_page_stat(memcg);
|
|
|
}
|
|
|
|
|
|
+static void page_remove_anon_compound_rmap(struct page *page)
|
|
|
+{
|
|
|
+ int i, nr;
|
|
|
+
|
|
|
+ if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Hugepages are not counted in NR_ANON_PAGES for now. */
|
|
|
+ if (unlikely(PageHuge(page)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
+
|
|
|
+ if (TestClearPageDoubleMap(page)) {
|
|
|
+ /*
|
|
|
+ * Subpages can be mapped with PTEs too. Check how many of
|
|
|
+ * themi are still mapped.
|
|
|
+ */
|
|
|
+ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
|
|
|
+ if (atomic_add_negative(-1, &page[i]._mapcount))
|
|
|
+ nr++;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ nr = HPAGE_PMD_NR;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (nr)
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* page_remove_rmap - take down pte mapping from a page
|
|
|
* @page: page to remove mapping from
|
|
@@ -1263,33 +1325,25 @@ out:
|
|
|
*/
|
|
|
void page_remove_rmap(struct page *page, bool compound)
|
|
|
{
|
|
|
- int nr = compound ? hpage_nr_pages(page) : 1;
|
|
|
-
|
|
|
if (!PageAnon(page)) {
|
|
|
VM_BUG_ON_PAGE(compound && !PageHuge(page), page);
|
|
|
page_remove_file_rmap(page);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ if (compound)
|
|
|
+ return page_remove_anon_compound_rmap(page);
|
|
|
+
|
|
|
/* page still mapped by someone else? */
|
|
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
|
|
return;
|
|
|
|
|
|
- /* Hugepages are not counted in NR_ANON_PAGES for now. */
|
|
|
- if (unlikely(PageHuge(page)))
|
|
|
- return;
|
|
|
-
|
|
|
/*
|
|
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
* these counters are not modified in interrupt context, and
|
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
*/
|
|
|
- if (compound) {
|
|
|
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
- __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
- }
|
|
|
-
|
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
|
|
|
+ __dec_zone_page_state(page, NR_ANON_PAGES);
|
|
|
|
|
|
if (unlikely(PageMlocked(page)))
|
|
|
clear_page_mlock(page);
|
|
@@ -1710,7 +1764,7 @@ void hugepage_add_anon_rmap(struct page *page,
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
BUG_ON(!anon_vma);
|
|
|
/* address might be in next vma when migration races vma_adjust */
|
|
|
- first = atomic_inc_and_test(&page->_mapcount);
|
|
|
+ first = atomic_inc_and_test(compound_mapcount_ptr(page));
|
|
|
if (first)
|
|
|
__hugepage_set_anon_rmap(page, vma, address, 0);
|
|
|
}
|
|
@@ -1719,7 +1773,7 @@ void hugepage_add_new_anon_rmap(struct page *page,
|
|
|
struct vm_area_struct *vma, unsigned long address)
|
|
|
{
|
|
|
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
|
|
- atomic_set(&page->_mapcount, 0);
|
|
|
+ atomic_set(compound_mapcount_ptr(page), 0);
|
|
|
__hugepage_set_anon_rmap(page, vma, address, 1);
|
|
|
}
|
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|