|
|
@@ -587,11 +587,6 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
|
|
|
error = PTR_ERR(dax.addr);
|
|
|
goto out;
|
|
|
}
|
|
|
-
|
|
|
- if (buffer_unwritten(bh) || buffer_new(bh)) {
|
|
|
- clear_pmem(dax.addr, PAGE_SIZE);
|
|
|
- wmb_pmem();
|
|
|
- }
|
|
|
dax_unmap_atomic(bdev, &dax);
|
|
|
|
|
|
error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
|
|
|
@@ -722,7 +717,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|
|
}
|
|
|
|
|
|
/* Filesystem should not return unwritten buffers to us! */
|
|
|
- WARN_ON_ONCE(buffer_unwritten(&bh));
|
|
|
+ WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
|
|
|
error = dax_insert_mapping(inode, &bh, vma, vmf);
|
|
|
|
|
|
out:
|
|
|
@@ -854,7 +849,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
if (get_block(inode, block, &bh, 1) != 0)
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
alloc = true;
|
|
|
- WARN_ON_ONCE(buffer_unwritten(&bh));
|
|
|
+ WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
|
|
|
}
|
|
|
|
|
|
bdev = bh.b_bdev;
|
|
|
@@ -953,14 +948,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|
|
dax_pmd_dbg(&bh, address, "pfn not in memmap");
|
|
|
goto fallback;
|
|
|
}
|
|
|
-
|
|
|
- if (buffer_unwritten(&bh) || buffer_new(&bh)) {
|
|
|
- clear_pmem(dax.addr, PMD_SIZE);
|
|
|
- wmb_pmem();
|
|
|
- count_vm_event(PGMAJFAULT);
|
|
|
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
|
|
|
- result |= VM_FAULT_MAJOR;
|
|
|
- }
|
|
|
dax_unmap_atomic(bdev, &dax);
|
|
|
|
|
|
/*
|