|
@@ -63,6 +63,7 @@
|
|
|
#include <linux/dma-debug.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
#include <linux/userfaultfd_k.h>
|
|
|
+#include <linux/dax.h>
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
#include <asm/mmu_context.h>
|
|
@@ -2785,7 +2786,8 @@ oom:
|
|
|
*/
|
|
|
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
pgoff_t pgoff, unsigned int flags,
|
|
|
- struct page *cow_page, struct page **page)
|
|
|
+ struct page *cow_page, struct page **page,
|
|
|
+ void **entry)
|
|
|
{
|
|
|
struct vm_fault vmf;
|
|
|
int ret;
|
|
@@ -2800,8 +2802,10 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
ret = vma->vm_ops->fault(vma, &vmf);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
- if (!vmf.page)
|
|
|
- goto out;
|
|
|
+ if (ret & VM_FAULT_DAX_LOCKED) {
|
|
|
+ *entry = vmf.entry;
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
if (unlikely(PageHWPoison(vmf.page))) {
|
|
|
if (ret & VM_FAULT_LOCKED)
|
|
@@ -2815,7 +2819,6 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
else
|
|
|
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
|
|
|
|
|
|
- out:
|
|
|
*page = vmf.page;
|
|
|
return ret;
|
|
|
}
|
|
@@ -2987,7 +2990,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
}
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
|
|
@@ -3010,6 +3013,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
|
|
|
{
|
|
|
struct page *fault_page, *new_page;
|
|
|
+ void *fault_entry;
|
|
|
struct mem_cgroup *memcg;
|
|
|
spinlock_t *ptl;
|
|
|
pte_t *pte;
|
|
@@ -3027,26 +3031,24 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
return VM_FAULT_OOM;
|
|
|
}
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page,
|
|
|
+ &fault_entry);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
goto uncharge_out;
|
|
|
|
|
|
- if (fault_page)
|
|
|
+ if (!(ret & VM_FAULT_DAX_LOCKED))
|
|
|
copy_user_highpage(new_page, fault_page, address, vma);
|
|
|
__SetPageUptodate(new_page);
|
|
|
|
|
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
if (unlikely(!pte_same(*pte, orig_pte))) {
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
- if (fault_page) {
|
|
|
+ if (!(ret & VM_FAULT_DAX_LOCKED)) {
|
|
|
unlock_page(fault_page);
|
|
|
put_page(fault_page);
|
|
|
} else {
|
|
|
- /*
|
|
|
- * The fault handler has no page to lock, so it holds
|
|
|
- * i_mmap_lock for read to protect against truncate.
|
|
|
- */
|
|
|
- i_mmap_unlock_read(vma->vm_file->f_mapping);
|
|
|
+ dax_unlock_mapping_entry(vma->vm_file->f_mapping,
|
|
|
+ pgoff);
|
|
|
}
|
|
|
goto uncharge_out;
|
|
|
}
|
|
@@ -3054,15 +3056,11 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
mem_cgroup_commit_charge(new_page, memcg, false, false);
|
|
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
|
|
pte_unmap_unlock(pte, ptl);
|
|
|
- if (fault_page) {
|
|
|
+ if (!(ret & VM_FAULT_DAX_LOCKED)) {
|
|
|
unlock_page(fault_page);
|
|
|
put_page(fault_page);
|
|
|
} else {
|
|
|
- /*
|
|
|
- * The fault handler has no page to lock, so it holds
|
|
|
- * i_mmap_lock for read to protect against truncate.
|
|
|
- */
|
|
|
- i_mmap_unlock_read(vma->vm_file->f_mapping);
|
|
|
+ dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
|
|
|
}
|
|
|
return ret;
|
|
|
uncharge_out:
|
|
@@ -3082,7 +3080,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
int dirtied = 0;
|
|
|
int ret, tmp;
|
|
|
|
|
|
- ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
|
|
|
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
|
|
|
return ret;
|
|
|
|