|
@@ -1340,18 +1340,17 @@ fallback:
|
|
|
return VM_FAULT_FALLBACK;
|
|
|
}
|
|
|
|
|
|
-int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
- pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
|
|
|
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
+ struct iomap_ops *ops)
|
|
|
{
|
|
|
struct address_space *mapping = vma->vm_file->f_mapping;
|
|
|
- unsigned long pmd_addr = address & PMD_MASK;
|
|
|
- bool write = flags & FAULT_FLAG_WRITE;
|
|
|
+ unsigned long pmd_addr = vmf->address & PMD_MASK;
|
|
|
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
|
|
|
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
|
|
|
struct inode *inode = mapping->host;
|
|
|
int result = VM_FAULT_FALLBACK;
|
|
|
struct iomap iomap = { 0 };
|
|
|
pgoff_t max_pgoff, pgoff;
|
|
|
- struct vm_fault vmf;
|
|
|
void *entry;
|
|
|
loff_t pos;
|
|
|
int error;
|
|
@@ -1364,7 +1363,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
pgoff = linear_page_index(vma, pmd_addr);
|
|
|
max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
|
|
|
|
|
|
- trace_dax_pmd_fault(inode, vma, address, flags, pgoff, max_pgoff, 0);
|
|
|
+ trace_dax_pmd_fault(inode, vma, vmf, max_pgoff, 0);
|
|
|
|
|
|
/* Fall back to PTEs if we're going to COW */
|
|
|
if (write && !(vma->vm_flags & VM_SHARED))
|
|
@@ -1408,21 +1407,17 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
if (IS_ERR(entry))
|
|
|
goto finish_iomap;
|
|
|
|
|
|
- vmf.pgoff = pgoff;
|
|
|
- vmf.flags = flags;
|
|
|
- vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
|
|
|
-
|
|
|
switch (iomap.type) {
|
|
|
case IOMAP_MAPPED:
|
|
|
- result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
|
|
|
- &iomap, pos, write, &entry);
|
|
|
+ result = dax_pmd_insert_mapping(vma, vmf->pmd, vmf,
|
|
|
+ vmf->address, &iomap, pos, write, &entry);
|
|
|
break;
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
case IOMAP_HOLE:
|
|
|
if (WARN_ON_ONCE(write))
|
|
|
goto unlock_entry;
|
|
|
- result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
|
|
|
- &entry);
|
|
|
+ result = dax_pmd_load_hole(vma, vmf->pmd, vmf, vmf->address,
|
|
|
+ &iomap, &entry);
|
|
|
break;
|
|
|
default:
|
|
|
WARN_ON_ONCE(1);
|
|
@@ -1448,12 +1443,11 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
}
|
|
|
fallback:
|
|
|
if (result == VM_FAULT_FALLBACK) {
|
|
|
- split_huge_pmd(vma, pmd, address);
|
|
|
+ split_huge_pmd(vma, vmf->pmd, vmf->address);
|
|
|
count_vm_event(THP_FAULT_FALLBACK);
|
|
|
}
|
|
|
out:
|
|
|
- trace_dax_pmd_fault_done(inode, vma, address, flags, pgoff, max_pgoff,
|
|
|
- result);
|
|
|
+ trace_dax_pmd_fault_done(inode, vma, vmf, max_pgoff, result);
|
|
|
return result;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
|