|
@@ -1133,6 +1133,7 @@ static void __page_check_anon_rmap(struct page *page,
|
|
|
* @page: the page to add the mapping to
|
|
|
* @vma: the vm area in which the mapping is added
|
|
|
* @address: the user virtual address mapped
|
|
|
+ * @compound: charge the page as compound or small page
|
|
|
*
|
|
|
* The caller needs to hold the pte lock, and the page must be locked in
|
|
|
* the anon_vma case: to serialize mapping,index checking after setting,
|
|
@@ -1140,9 +1141,9 @@ static void __page_check_anon_rmap(struct page *page,
|
|
|
* (but PageKsm is never downgraded to PageAnon).
|
|
|
*/
|
|
|
void page_add_anon_rmap(struct page *page,
|
|
|
- struct vm_area_struct *vma, unsigned long address)
|
|
|
+ struct vm_area_struct *vma, unsigned long address, bool compound)
|
|
|
{
|
|
|
- do_page_add_anon_rmap(page, vma, address, 0);
|
|
|
+ do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1151,21 +1152,24 @@ void page_add_anon_rmap(struct page *page,
|
|
|
* Everybody else should continue to use page_add_anon_rmap above.
|
|
|
*/
|
|
|
void do_page_add_anon_rmap(struct page *page,
|
|
|
- struct vm_area_struct *vma, unsigned long address, int exclusive)
|
|
|
+ struct vm_area_struct *vma, unsigned long address, int flags)
|
|
|
{
|
|
|
int first = atomic_inc_and_test(&page->_mapcount);
|
|
|
if (first) {
|
|
|
+ bool compound = flags & RMAP_COMPOUND;
|
|
|
+ int nr = compound ? hpage_nr_pages(page) : 1;
|
|
|
/*
|
|
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
|
|
* these counters are not modified in interrupt context, and
|
|
|
* pte lock(a spinlock) is held, which implies preemption
|
|
|
* disabled.
|
|
|
*/
|
|
|
- if (PageTransHuge(page))
|
|
|
+ if (compound) {
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
__inc_zone_page_state(page,
|
|
|
NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
|
|
- hpage_nr_pages(page));
|
|
|
+ }
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
|
|
}
|
|
|
if (unlikely(PageKsm(page)))
|
|
|
return;
|
|
@@ -1173,7 +1177,8 @@ void do_page_add_anon_rmap(struct page *page,
|
|
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
|
|
/* address might be in next vma when migration races vma_adjust */
|
|
|
if (first)
|
|
|
- __page_set_anon_rmap(page, vma, address, exclusive);
|
|
|
+ __page_set_anon_rmap(page, vma, address,
|
|
|
+ flags & RMAP_EXCLUSIVE);
|
|
|
else
|
|
|
__page_check_anon_rmap(page, vma, address);
|
|
|
}
|
|
@@ -1183,21 +1188,25 @@ void do_page_add_anon_rmap(struct page *page,
|
|
|
* @page: the page to add the mapping to
|
|
|
* @vma: the vm area in which the mapping is added
|
|
|
* @address: the user virtual address mapped
|
|
|
+ * @compound: charge the page as compound or small page
|
|
|
*
|
|
|
* Same as page_add_anon_rmap but must only be called on *new* pages.
|
|
|
* This means the inc-and-test can be bypassed.
|
|
|
* Page does not have to be locked.
|
|
|
*/
|
|
|
void page_add_new_anon_rmap(struct page *page,
|
|
|
- struct vm_area_struct *vma, unsigned long address)
|
|
|
+ struct vm_area_struct *vma, unsigned long address, bool compound)
|
|
|
{
|
|
|
+ int nr = compound ? hpage_nr_pages(page) : 1;
|
|
|
+
|
|
|
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
|
|
|
SetPageSwapBacked(page);
|
|
|
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
|
|
|
- if (PageTransHuge(page))
|
|
|
+ if (compound) {
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
|
|
- hpage_nr_pages(page));
|
|
|
+ }
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
|
|
__page_set_anon_rmap(page, vma, address, 1);
|
|
|
}
|
|
|
|
|
@@ -1249,13 +1258,17 @@ out:
|
|
|
|
|
|
/**
|
|
|
* page_remove_rmap - take down pte mapping from a page
|
|
|
- * @page: page to remove mapping from
|
|
|
+ * @page: page to remove mapping from
|
|
|
+ * @compound: uncharge the page as compound or small page
|
|
|
*
|
|
|
* The caller needs to hold the pte lock.
|
|
|
*/
|
|
|
-void page_remove_rmap(struct page *page)
|
|
|
+void page_remove_rmap(struct page *page, bool compound)
|
|
|
{
|
|
|
+ int nr = compound ? hpage_nr_pages(page) : 1;
|
|
|
+
|
|
|
if (!PageAnon(page)) {
|
|
|
+ VM_BUG_ON_PAGE(compound && !PageHuge(page), page);
|
|
|
page_remove_file_rmap(page);
|
|
|
return;
|
|
|
}
|
|
@@ -1273,11 +1286,12 @@ void page_remove_rmap(struct page *page)
|
|
|
* these counters are not modified in interrupt context, and
|
|
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
|
|
*/
|
|
|
- if (PageTransHuge(page))
|
|
|
+ if (compound) {
|
|
|
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
|
|
|
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
|
+ }
|
|
|
|
|
|
- __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
|
|
- -hpage_nr_pages(page));
|
|
|
+ __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
|
|
|
|
|
|
if (unlikely(PageMlocked(page)))
|
|
|
clear_page_mlock(page);
|
|
@@ -1416,7 +1430,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
} else
|
|
|
dec_mm_counter(mm, mm_counter_file(page));
|
|
|
|
|
|
- page_remove_rmap(page);
|
|
|
+ page_remove_rmap(page, PageHuge(page));
|
|
|
page_cache_release(page);
|
|
|
|
|
|
out_unmap:
|