|
|
@@ -1412,9 +1412,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
|
|
static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
|
|
|
{
|
|
|
- return (vma->vm_flags & VM_GROWSDOWN) &&
|
|
|
- (vma->vm_start == addr) &&
|
|
|
- !vma_stack_continue(vma->vm_prev, addr);
|
|
|
+ return stack_guard_page_start(vma, addr) ||
|
|
|
+ stack_guard_page_end(vma, addr+PAGE_SIZE);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -1551,12 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * For mlock, just skip the stack guard page.
|
|
|
- */
|
|
|
- if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start))
|
|
|
- goto next_page;
|
|
|
-
|
|
|
do {
|
|
|
struct page *page;
|
|
|
unsigned int foll_flags = gup_flags;
|
|
|
@@ -1573,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
int ret;
|
|
|
unsigned int fault_flags = 0;
|
|
|
|
|
|
+ /* For mlock, just skip the stack guard page. */
|
|
|
+ if (foll_flags & FOLL_MLOCK) {
|
|
|
+ if (stack_guard_page(vma, start))
|
|
|
+ goto next_page;
|
|
|
+ }
|
|
|
if (foll_flags & FOLL_WRITE)
|
|
|
fault_flags |= FAULT_FLAG_WRITE;
|
|
|
if (nonblocking)
|