|
@@ -2399,7 +2399,10 @@ EXPORT_SYMBOL(unmap_mapping_range);
|
|
/*
|
|
/*
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
- * We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
|
|
|
|
+ * We return with pte unmapped and unlocked.
|
|
|
|
+ *
|
|
|
|
+ * We return with the mmap_sem locked or unlocked in the same cases
|
|
|
|
+ * as does filemap_fault().
|
|
*/
|
|
*/
|
|
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
@@ -2688,6 +2691,11 @@ oom:
|
|
return VM_FAULT_OOM;
|
|
return VM_FAULT_OOM;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The mmap_sem must have been held on entry, and may have been
|
|
|
|
+ * released depending on flags and vma->vm_ops->fault() return value.
|
|
|
|
+ * See filemap_fault() and __lock_page_retry().
|
|
|
|
+ */
|
|
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
pgoff_t pgoff, unsigned int flags, struct page **page)
|
|
pgoff_t pgoff, unsigned int flags, struct page **page)
|
|
{
|
|
{
|
|
@@ -3016,6 +3024,12 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
|
|
+ * but allow concurrent faults).
|
|
|
|
+ * The mmap_sem may have been released depending on flags and our
|
|
|
|
+ * return value. See filemap_fault() and __lock_page_or_retry().
|
|
|
|
+ */
|
|
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
unsigned int flags, pte_t orig_pte)
|
|
unsigned int flags, pte_t orig_pte)
|
|
@@ -3040,7 +3054,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
*
|
|
*
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
- * We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
|
|
|
|
+ * We return with pte unmapped and unlocked.
|
|
|
|
+ * The mmap_sem may have been released depending on flags and our
|
|
|
|
+ * return value. See filemap_fault() and __lock_page_or_retry().
|
|
*/
|
|
*/
|
|
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
@@ -3172,7 +3188,10 @@ out:
|
|
*
|
|
*
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
* but allow concurrent faults), and pte mapped but not yet locked.
|
|
- * We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
|
|
|
|
+ * We return with pte unmapped and unlocked.
|
|
|
|
+ *
|
|
|
|
+ * The mmap_sem may have been released depending on flags and our
|
|
|
|
+ * return value. See filemap_fault() and __lock_page_or_retry().
|
|
*/
|
|
*/
|
|
static int handle_pte_fault(struct mm_struct *mm,
|
|
static int handle_pte_fault(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
@@ -3232,6 +3251,9 @@ unlock:
|
|
|
|
|
|
/*
|
|
/*
|
|
* By the time we get here, we already hold the mm semaphore
|
|
* By the time we get here, we already hold the mm semaphore
|
|
|
|
+ *
|
|
|
|
+ * The mmap_sem may have been released depending on flags and our
|
|
|
|
+ * return value. See filemap_fault() and __lock_page_or_retry().
|
|
*/
|
|
*/
|
|
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int flags)
|
|
unsigned long address, unsigned int flags)
|
|
@@ -3313,6 +3335,12 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
|
|
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * By the time we get here, we already hold the mm semaphore
|
|
|
|
+ *
|
|
|
|
+ * The mmap_sem may have been released depending on flags and our
|
|
|
|
+ * return value. See filemap_fault() and __lock_page_or_retry().
|
|
|
|
+ */
|
|
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int flags)
|
|
unsigned long address, unsigned int flags)
|
|
{
|
|
{
|