|
@@ -494,15 +494,6 @@ static inline int page_count(struct page *page)
|
|
|
return atomic_read(&compound_head(page)->_count);
|
|
return atomic_read(&compound_head(page)->_count);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
|
-extern int PageHeadHuge(struct page *page_head);
|
|
|
|
|
-#else /* CONFIG_HUGETLB_PAGE */
|
|
|
|
|
-static inline int PageHeadHuge(struct page *page_head)
|
|
|
|
|
-{
|
|
|
|
|
- return 0;
|
|
|
|
|
-}
|
|
|
|
|
-#endif /* CONFIG_HUGETLB_PAGE */
|
|
|
|
|
-
|
|
|
|
|
static inline bool __compound_tail_refcounted(struct page *page)
|
|
static inline bool __compound_tail_refcounted(struct page *page)
|
|
|
{
|
|
{
|
|
|
return !PageSlab(page) && !PageHeadHuge(page);
|
|
return !PageSlab(page) && !PageHeadHuge(page);
|
|
@@ -571,53 +562,6 @@ static inline void init_page_count(struct page *page)
|
|
|
atomic_set(&page->_count, 1);
|
|
atomic_set(&page->_count, 1);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-/*
|
|
|
|
|
- * PageBuddy() indicate that the page is free and in the buddy system
|
|
|
|
|
- * (see mm/page_alloc.c).
|
|
|
|
|
- *
|
|
|
|
|
- * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
|
|
|
|
|
- * -2 so that an underflow of the page_mapcount() won't be mistaken
|
|
|
|
|
- * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
|
|
|
|
|
- * efficiently by most CPU architectures.
|
|
|
|
|
- */
|
|
|
|
|
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
|
|
|
|
|
-
|
|
|
|
|
-static inline int PageBuddy(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static inline void __SetPageBuddy(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
|
|
|
|
- atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static inline void __ClearPageBuddy(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- VM_BUG_ON_PAGE(!PageBuddy(page), page);
|
|
|
|
|
- atomic_set(&page->_mapcount, -1);
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
|
|
|
|
|
-
|
|
|
|
|
-static inline int PageBalloon(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static inline void __SetPageBalloon(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
|
|
|
|
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static inline void __ClearPageBalloon(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
|
|
|
|
|
- atomic_set(&page->_mapcount, -1);
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
void put_page(struct page *page);
|
|
void put_page(struct page *page);
|
|
|
void put_pages_list(struct list_head *pages);
|
|
void put_pages_list(struct list_head *pages);
|
|
|
|
|
|
|
@@ -1006,26 +950,6 @@ void page_address_init(void);
|
|
|
#define page_address_init() do { } while(0)
|
|
#define page_address_init() do { } while(0)
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
-/*
|
|
|
|
|
- * On an anonymous page mapped into a user virtual memory area,
|
|
|
|
|
- * page->mapping points to its anon_vma, not to a struct address_space;
|
|
|
|
|
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
|
|
|
|
|
- *
|
|
|
|
|
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
|
|
|
|
|
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
|
|
|
|
|
- * and then page->mapping points, not to an anon_vma, but to a private
|
|
|
|
|
- * structure which KSM associates with that merged page. See ksm.h.
|
|
|
|
|
- *
|
|
|
|
|
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
|
|
|
|
|
- *
|
|
|
|
|
- * Please note that, confusingly, "page_mapping" refers to the inode
|
|
|
|
|
- * address_space which maps the page from disk; whereas "page_mapped"
|
|
|
|
|
- * refers to user virtual address space into which the page is mapped.
|
|
|
|
|
- */
|
|
|
|
|
-#define PAGE_MAPPING_ANON 1
|
|
|
|
|
-#define PAGE_MAPPING_KSM 2
|
|
|
|
|
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
|
|
|
|
|
-
|
|
|
|
|
extern struct address_space *page_mapping(struct page *page);
|
|
extern struct address_space *page_mapping(struct page *page);
|
|
|
|
|
|
|
|
/* Neutral page->mapping pointer to address_space or anon_vma or other */
|
|
/* Neutral page->mapping pointer to address_space or anon_vma or other */
|
|
@@ -1045,11 +969,6 @@ struct address_space *page_file_mapping(struct page *page)
|
|
|
return page->mapping;
|
|
return page->mapping;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static inline int PageAnon(struct page *page)
|
|
|
|
|
-{
|
|
|
|
|
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
/*
|
|
/*
|
|
|
* Return the pagecache index of the passed page. Regular pagecache pages
|
|
* Return the pagecache index of the passed page. Regular pagecache pages
|
|
|
* use ->index whereas swapcache pages use ->private
|
|
* use ->index whereas swapcache pages use ->private
|