|
@@ -289,6 +289,47 @@ PAGEFLAG_FALSE(HWPoison)
|
|
|
#define __PG_HWPOISON 0
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * On an anonymous page mapped into a user virtual memory area,
|
|
|
+ * page->mapping points to its anon_vma, not to a struct address_space;
|
|
|
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
|
|
|
+ *
|
|
|
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
|
|
|
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
|
|
|
+ * and then page->mapping points, not to an anon_vma, but to a private
|
|
|
+ * structure which KSM associates with that merged page. See ksm.h.
|
|
|
+ *
|
|
|
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
|
|
|
+ *
|
|
|
+ * Please note that, confusingly, "page_mapping" refers to the inode
|
|
|
+ * address_space which maps the page from disk; whereas "page_mapped"
|
|
|
+ * refers to user virtual address space into which the page is mapped.
|
|
|
+ */
|
|
|
+#define PAGE_MAPPING_ANON 1
|
|
|
+#define PAGE_MAPPING_KSM 2
|
|
|
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
|
|
|
+
|
|
|
+static inline int PageAnon(struct page *page)
|
|
|
+{
|
|
|
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_KSM
|
|
|
+/*
|
|
|
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
|
|
|
+ * which KSM maps into multiple mms, wherever identical anonymous page content
|
|
|
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
|
|
|
+ * anon_vma, but to that page's node of the stable tree.
|
|
|
+ */
|
|
|
+static inline int PageKsm(struct page *page)
|
|
|
+{
|
|
|
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
|
|
|
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
|
|
|
+}
|
|
|
+#else
|
|
|
+TESTPAGEFLAG_FALSE(Ksm)
|
|
|
+#endif
|
|
|
+
|
|
|
u64 stable_page_flags(struct page *page);
|
|
|
|
|
|
static inline int PageUptodate(struct page *page)
|
|
@@ -426,6 +467,14 @@ static inline void ClearPageCompound(struct page *page)
|
|
|
|
|
|
#endif /* !PAGEFLAGS_EXTENDED */
|
|
|
|
|
|
+#ifdef CONFIG_HUGETLB_PAGE
|
|
|
+int PageHuge(struct page *page);
|
|
|
+int PageHeadHuge(struct page *page);
|
|
|
+#else
|
|
|
+TESTPAGEFLAG_FALSE(Huge)
|
|
|
+TESTPAGEFLAG_FALSE(HeadHuge)
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
/*
|
|
|
* PageHuge() only returns true for hugetlbfs pages, but not for
|
|
@@ -479,6 +528,53 @@ static inline int PageTransTail(struct page *page)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * PageBuddy() indicate that the page is free and in the buddy system
|
|
|
+ * (see mm/page_alloc.c).
|
|
|
+ *
|
|
|
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
|
|
|
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
|
|
|
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
|
|
|
+ * efficiently by most CPU architectures.
|
|
|
+ */
|
|
|
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
|
|
|
+
|
|
|
+static inline int PageBuddy(struct page *page)
|
|
|
+{
|
|
|
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __SetPageBuddy(struct page *page)
|
|
|
+{
|
|
|
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
|
|
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __ClearPageBuddy(struct page *page)
|
|
|
+{
|
|
|
+ VM_BUG_ON_PAGE(!PageBuddy(page), page);
|
|
|
+ atomic_set(&page->_mapcount, -1);
|
|
|
+}
|
|
|
+
|
|
|
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
|
|
|
+
|
|
|
+static inline int PageBalloon(struct page *page)
|
|
|
+{
|
|
|
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __SetPageBalloon(struct page *page)
|
|
|
+{
|
|
|
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
|
|
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __ClearPageBalloon(struct page *page)
|
|
|
+{
|
|
|
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
|
|
|
+ atomic_set(&page->_mapcount, -1);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* If network-based swap is enabled, sl*b must keep track of whether pages
|
|
|
* were allocated from pfmemalloc reserves.
|