|
@@ -618,6 +618,8 @@ EXPORT_SYMBOL(__get_user_pages);
|
|
|
* @mm: mm_struct of target mm
|
|
|
* @address: user address
|
|
|
* @fault_flags:flags to pass down to handle_mm_fault()
|
|
|
+ * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
|
|
|
+ * does not allow retry
|
|
|
*
|
|
|
* This is meant to be called in the specific scenario where for locking reasons
|
|
|
* we try to access user memory in atomic context (within a pagefault_disable()
|
|
@@ -629,22 +631,28 @@ EXPORT_SYMBOL(__get_user_pages);
|
|
|
* The main difference with get_user_pages() is that this function will
|
|
|
* unconditionally call handle_mm_fault() which will in turn perform all the
|
|
|
* necessary SW fixup of the dirty and young bits in the PTE, while
|
|
|
- * handle_mm_fault() only guarantees to update these in the struct page.
|
|
|
+ * get_user_pages() only guarantees to update these in the struct page.
|
|
|
*
|
|
|
* This is important for some architectures where those bits also gate the
|
|
|
* access permission to the page because they are maintained in software. On
|
|
|
* such architectures, gup() will not be enough to make a subsequent access
|
|
|
* succeed.
|
|
|
*
|
|
|
- * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
|
|
|
+ * This function will not return with an unlocked mmap_sem. So it has not the
|
|
|
+ * same semantics wrt the @mm->mmap_sem as does filemap_fault().
|
|
|
*/
|
|
|
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
- unsigned long address, unsigned int fault_flags)
|
|
|
+ unsigned long address, unsigned int fault_flags,
|
|
|
+ bool *unlocked)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
vm_flags_t vm_flags;
|
|
|
- int ret;
|
|
|
+ int ret, major = 0;
|
|
|
+
|
|
|
+ if (unlocked)
|
|
|
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY;
|
|
|
|
|
|
+retry:
|
|
|
vma = find_extend_vma(mm, address);
|
|
|
if (!vma || address < vma->vm_start)
|
|
|
return -EFAULT;
|
|
@@ -654,6 +662,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
return -EFAULT;
|
|
|
|
|
|
ret = handle_mm_fault(mm, vma, address, fault_flags);
|
|
|
+ major |= ret & VM_FAULT_MAJOR;
|
|
|
if (ret & VM_FAULT_ERROR) {
|
|
|
if (ret & VM_FAULT_OOM)
|
|
|
return -ENOMEM;
|
|
@@ -663,8 +672,19 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
return -EFAULT;
|
|
|
BUG();
|
|
|
}
|
|
|
+
|
|
|
+ if (ret & VM_FAULT_RETRY) {
|
|
|
+ down_read(&mm->mmap_sem);
|
|
|
+ if (!(fault_flags & FAULT_FLAG_TRIED)) {
|
|
|
+ *unlocked = true;
|
|
|
+ fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
|
|
+ fault_flags |= FAULT_FLAG_TRIED;
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (tsk) {
|
|
|
- if (ret & VM_FAULT_MAJOR)
|
|
|
+ if (major)
|
|
|
tsk->maj_flt++;
|
|
|
else
|
|
|
tsk->min_flt++;
|