|
@@ -188,31 +188,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
|
|
|
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Called only in fault path, to determine if a new page is being
|
|
|
- * mapped into a LOCKED vma. If it is, mark page as mlocked.
|
|
|
- */
|
|
|
-static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
|
|
|
- struct page *page)
|
|
|
-{
|
|
|
- VM_BUG_ON_PAGE(PageLRU(page), page);
|
|
|
-
|
|
|
- if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (!TestSetPageMlocked(page)) {
|
|
|
- /*
|
|
|
- * We use the irq-unsafe __mod_zone_page_stat because this
|
|
|
- * counter is not modified from interrupt context, and the pte
|
|
|
- * lock is held(spinlock), which implies preemption disabled.
|
|
|
- */
|
|
|
- __mod_zone_page_state(page_zone(page), NR_MLOCK,
|
|
|
- hpage_nr_pages(page));
|
|
|
- count_vm_event(UNEVICTABLE_PGMLOCKED);
|
|
|
- }
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* must be called with vma's mmap_sem held for read or write, and page locked.
|
|
|
*/
|
|
@@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
|
|
|
struct vm_area_struct *vma);
|
|
|
#endif
|
|
|
#else /* !CONFIG_MMU */
|
|
|
-static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
|
|
|
-{
|
|
|
- return 0;
|
|
|
-}
|
|
|
static inline void clear_page_mlock(struct page *page) { }
|
|
|
static inline void mlock_vma_page(struct page *page) { }
|
|
|
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|