瀏覽代碼

mm: differentiate page_mapped() from page_mapcount() for compound pages

Let's define page_mapped() to be true for compound pages if any
sub-pages of the compound page is mapped (with PMD or PTE).

On other hand page_mapcount() return mapcount for this particular small
page.

This will make cases like page_get_anon_vma() behave correctly once we
allow huge pages to be mapped with PTE.

Most users outside core-mm should use page_mapcount() instead of
page_mapped().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Kirill A. Shutemov 9 年之前
父節點
當前提交
e1534ae950
共有 11 個文件被更改,包括 31 次插入19 次删除
  1. 2 2
      arch/arc/mm/cache.c
  2. 1 1
      arch/arm/mm/flush.c
  3. 2 1
      arch/mips/mm/c-r4k.c
  4. 1 1
      arch/mips/mm/cache.c
  5. 3 3
      arch/mips/mm/init.c
  6. 1 1
      arch/sh/mm/cache-sh4.c
  7. 4 4
      arch/sh/mm/cache.c
  8. 1 1
      arch/xtensa/mm/tlb.c
  9. 2 2
      fs/proc/page.c
  10. 13 2
      include/linux/mm.h
  11. 1 1
      mm/filemap.c

+ 2 - 2
arch/arc/mm/cache.c

@@ -617,7 +617,7 @@ void flush_dcache_page(struct page *page)
 	 */
 	 */
 	if (!mapping_mapped(mapping)) {
 	if (!mapping_mapped(mapping)) {
 		clear_bit(PG_dc_clean, &page->flags);
 		clear_bit(PG_dc_clean, &page->flags);
-	} else if (page_mapped(page)) {
+	} else if (page_mapcount(page)) {
 
 
 		/* kernel reading from page with U-mapping */
 		/* kernel reading from page with U-mapping */
 		phys_addr_t paddr = (unsigned long)page_address(page);
 		phys_addr_t paddr = (unsigned long)page_address(page);
@@ -857,7 +857,7 @@ void copy_user_highpage(struct page *to, struct page *from,
 	 * For !VIPT cache, all of this gets compiled out as
 	 * For !VIPT cache, all of this gets compiled out as
 	 * addr_not_cache_congruent() is 0
 	 * addr_not_cache_congruent() is 0
 	 */
 	 */
-	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+	if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
 		__flush_dcache_page((unsigned long)kfrom, u_vaddr);
 		__flush_dcache_page((unsigned long)kfrom, u_vaddr);
 		clean_src_k_mappings = 1;
 		clean_src_k_mappings = 1;
 	}
 	}

+ 1 - 1
arch/arm/mm/flush.c

@@ -330,7 +330,7 @@ void flush_dcache_page(struct page *page)
 	mapping = page_mapping(page);
 	mapping = page_mapping(page);
 
 
 	if (!cache_ops_need_broadcast() &&
 	if (!cache_ops_need_broadcast() &&
-	    mapping && !page_mapped(page))
+	    mapping && !page_mapcount(page))
 		clear_bit(PG_dcache_clean, &page->flags);
 		clear_bit(PG_dcache_clean, &page->flags);
 	else {
 	else {
 		__flush_dcache_page(mapping, page);
 		__flush_dcache_page(mapping, page);

+ 2 - 1
arch/mips/mm/c-r4k.c

@@ -587,7 +587,8 @@ static inline void local_r4k_flush_cache_page(void *args)
 		 * another ASID than the current one.
 		 * another ASID than the current one.
 		 */
 		 */
 		map_coherent = (cpu_has_dc_aliases &&
 		map_coherent = (cpu_has_dc_aliases &&
-				page_mapped(page) && !Page_dcache_dirty(page));
+				page_mapcount(page) &&
+				!Page_dcache_dirty(page));
 		if (map_coherent)
 		if (map_coherent)
 			vaddr = kmap_coherent(page, addr);
 			vaddr = kmap_coherent(page, addr);
 		else
 		else

+ 1 - 1
arch/mips/mm/cache.c

@@ -106,7 +106,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 	unsigned long addr = (unsigned long) page_address(page);
 	unsigned long addr = (unsigned long) page_address(page);
 
 
 	if (pages_do_alias(addr, vmaddr)) {
 	if (pages_do_alias(addr, vmaddr)) {
-		if (page_mapped(page) && !Page_dcache_dirty(page)) {
+		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
 			void *kaddr;
 			void *kaddr;
 
 
 			kaddr = kmap_coherent(page, vmaddr);
 			kaddr = kmap_coherent(page, vmaddr);

+ 3 - 3
arch/mips/mm/init.c

@@ -165,7 +165,7 @@ void copy_user_highpage(struct page *to, struct page *from,
 
 
 	vto = kmap_atomic(to);
 	vto = kmap_atomic(to);
 	if (cpu_has_dc_aliases &&
 	if (cpu_has_dc_aliases &&
-	    page_mapped(from) && !Page_dcache_dirty(from)) {
+	    page_mapcount(from) && !Page_dcache_dirty(from)) {
 		vfrom = kmap_coherent(from, vaddr);
 		vfrom = kmap_coherent(from, vaddr);
 		copy_page(vto, vfrom);
 		copy_page(vto, vfrom);
 		kunmap_coherent();
 		kunmap_coherent();
@@ -187,7 +187,7 @@ void copy_to_user_page(struct vm_area_struct *vma,
 	unsigned long len)
 	unsigned long len)
 {
 {
 	if (cpu_has_dc_aliases &&
 	if (cpu_has_dc_aliases &&
-	    page_mapped(page) && !Page_dcache_dirty(page)) {
+	    page_mapcount(page) && !Page_dcache_dirty(page)) {
 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(vto, src, len);
 		memcpy(vto, src, len);
 		kunmap_coherent();
 		kunmap_coherent();
@@ -205,7 +205,7 @@ void copy_from_user_page(struct vm_area_struct *vma,
 	unsigned long len)
 	unsigned long len)
 {
 {
 	if (cpu_has_dc_aliases &&
 	if (cpu_has_dc_aliases &&
-	    page_mapped(page) && !Page_dcache_dirty(page)) {
+	    page_mapcount(page) && !Page_dcache_dirty(page)) {
 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(dst, vfrom, len);
 		memcpy(dst, vfrom, len);
 		kunmap_coherent();
 		kunmap_coherent();

+ 1 - 1
arch/sh/mm/cache-sh4.c

@@ -241,7 +241,7 @@ static void sh4_flush_cache_page(void *args)
 		 */
 		 */
 		map_coherent = (current_cpu_data.dcache.n_aliases &&
 		map_coherent = (current_cpu_data.dcache.n_aliases &&
 			test_bit(PG_dcache_clean, &page->flags) &&
 			test_bit(PG_dcache_clean, &page->flags) &&
-			page_mapped(page));
+			page_mapcount(page));
 		if (map_coherent)
 		if (map_coherent)
 			vaddr = kmap_coherent(page, address);
 			vaddr = kmap_coherent(page, address);
 		else
 		else

+ 4 - 4
arch/sh/mm/cache.c

@@ -59,7 +59,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 		       unsigned long vaddr, void *dst, const void *src,
 		       unsigned long vaddr, void *dst, const void *src,
 		       unsigned long len)
 		       unsigned long len)
 {
 {
-	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
 	    test_bit(PG_dcache_clean, &page->flags)) {
 	    test_bit(PG_dcache_clean, &page->flags)) {
 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(vto, src, len);
 		memcpy(vto, src, len);
@@ -78,7 +78,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
 			 unsigned long vaddr, void *dst, const void *src,
 			 unsigned long vaddr, void *dst, const void *src,
 			 unsigned long len)
 			 unsigned long len)
 {
 {
-	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
 	    test_bit(PG_dcache_clean, &page->flags)) {
 	    test_bit(PG_dcache_clean, &page->flags)) {
 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(dst, vfrom, len);
 		memcpy(dst, vfrom, len);
@@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from,
 
 
 	vto = kmap_atomic(to);
 	vto = kmap_atomic(to);
 
 
-	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
+	if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) &&
 	    test_bit(PG_dcache_clean, &from->flags)) {
 	    test_bit(PG_dcache_clean, &from->flags)) {
 		vfrom = kmap_coherent(from, vaddr);
 		vfrom = kmap_coherent(from, vaddr);
 		copy_page(vto, vfrom);
 		copy_page(vto, vfrom);
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 	unsigned long addr = (unsigned long) page_address(page);
 	unsigned long addr = (unsigned long) page_address(page);
 
 
 	if (pages_do_alias(addr, vmaddr)) {
 	if (pages_do_alias(addr, vmaddr)) {
-		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+		if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
 		    test_bit(PG_dcache_clean, &page->flags)) {
 		    test_bit(PG_dcache_clean, &page->flags)) {
 			void *kaddr;
 			void *kaddr;
 
 

+ 1 - 1
arch/xtensa/mm/tlb.c

@@ -245,7 +245,7 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
 						page_mapcount(p));
 						page_mapcount(p));
 				if (!page_count(p))
 				if (!page_count(p))
 					rc |= TLB_INSANE;
 					rc |= TLB_INSANE;
-				else if (page_mapped(p))
+				else if (page_mapcount(p))
 					rc |= TLB_SUSPICIOUS;
 					rc |= TLB_SUSPICIOUS;
 			} else {
 			} else {
 				rc |= TLB_INSANE;
 				rc |= TLB_INSANE;

+ 2 - 2
fs/proc/page.c

@@ -103,9 +103,9 @@ u64 stable_page_flags(struct page *page)
 	 * pseudo flags for the well known (anonymous) memory mapped pages
 	 * pseudo flags for the well known (anonymous) memory mapped pages
 	 *
 	 *
 	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
 	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
-	 * simple test in page_mapped() is not enough.
+	 * simple test in page_mapcount() is not enough.
 	 */
 	 */
-	if (!PageSlab(page) && page_mapped(page))
+	if (!PageSlab(page) && page_mapcount(page))
 		u |= 1 << KPF_MMAP;
 		u |= 1 << KPF_MMAP;
 	if (PageAnon(page))
 	if (PageAnon(page))
 		u |= 1 << KPF_ANON;
 		u |= 1 << KPF_ANON;

+ 13 - 2
include/linux/mm.h

@@ -953,10 +953,21 @@ static inline pgoff_t page_file_index(struct page *page)
 
 
 /*
 /*
  * Return true if this page is mapped into pagetables.
  * Return true if this page is mapped into pagetables.
+ * For compound page it returns true if any subpage of compound page is mapped.
  */
  */
-static inline int page_mapped(struct page *page)
+static inline bool page_mapped(struct page *page)
 {
 {
-	return atomic_read(&(page)->_mapcount) + compound_mapcount(page) >= 0;
+	int i;
+	if (likely(!PageCompound(page)))
+		return atomic_read(&page->_mapcount) >= 0;
+	page = compound_head(page);
+	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
+		return true;
+	for (i = 0; i < hpage_nr_pages(page); i++) {
+		if (atomic_read(&page[i]._mapcount) >= 0)
+			return true;
+	}
+	return false;
 }
 }
 
 
 /*
 /*

+ 1 - 1
mm/filemap.c

@@ -204,7 +204,7 @@ void __delete_from_page_cache(struct page *page, void *shadow,
 		__dec_zone_page_state(page, NR_FILE_PAGES);
 		__dec_zone_page_state(page, NR_FILE_PAGES);
 	if (PageSwapBacked(page))
 	if (PageSwapBacked(page))
 		__dec_zone_page_state(page, NR_SHMEM);
 		__dec_zone_page_state(page, NR_SHMEM);
-	BUG_ON(page_mapped(page));
+	VM_BUG_ON_PAGE(page_mapped(page), page);
 
 
 	/*
 	/*
 	 * At this point page must be either written or cleaned by truncate.
 	 * At this point page must be either written or cleaned by truncate.