|
@@ -1009,7 +1009,6 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
|
|
unsigned long vaddr = vmf->address;
|
|
|
int ret = VM_FAULT_NOPAGE;
|
|
|
struct page *zero_page;
|
|
|
- void *entry2;
|
|
|
pfn_t pfn;
|
|
|
|
|
|
zero_page = ZERO_PAGE(0);
|
|
@@ -1019,13 +1018,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
|
|
}
|
|
|
|
|
|
pfn = page_to_pfn_t(zero_page);
|
|
|
- entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
|
|
- RADIX_DAX_ZERO_PAGE, false);
|
|
|
- if (IS_ERR(entry2)) {
|
|
|
- ret = VM_FAULT_SIGBUS;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
+ dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
|
|
|
+ false);
|
|
|
vm_insert_mixed(vmf->vma, vaddr, pfn);
|
|
|
out:
|
|
|
trace_dax_load_hole(inode, vmf, ret);
|
|
@@ -1337,10 +1331,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
|
|
|
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
|
|
0, write && !sync);
|
|
|
- if (IS_ERR(entry)) {
|
|
|
- error = PTR_ERR(entry);
|
|
|
- goto error_finish_iomap;
|
|
|
- }
|
|
|
|
|
|
/*
|
|
|
* If we are doing synchronous page fault and inode needs fsync,
|
|
@@ -1424,8 +1414,6 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
|
|
|
pfn = page_to_pfn_t(zero_page);
|
|
|
ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
|
|
RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
|
|
|
- if (IS_ERR(ret))
|
|
|
- goto fallback;
|
|
|
|
|
|
ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
|
|
|
if (!pmd_none(*(vmf->pmd))) {
|
|
@@ -1547,8 +1535,6 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
|
|
|
entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
|
|
|
RADIX_DAX_PMD, write && !sync);
|
|
|
- if (IS_ERR(entry))
|
|
|
- goto finish_iomap;
|
|
|
|
|
|
/*
|
|
|
* If we are doing synchronous page fault and inode needs fsync,
|