|
@@ -1091,6 +1091,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
unsigned flags = IOMAP_FAULT;
|
|
|
int error, major = 0;
|
|
|
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
|
|
+ bool sync;
|
|
|
int vmf_ret = 0;
|
|
|
void *entry;
|
|
|
pfn_t pfn;
|
|
@@ -1169,6 +1170,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
goto finish_iomap;
|
|
|
}
|
|
|
|
|
|
+ sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY);
|
|
|
+
|
|
|
switch (iomap.type) {
|
|
|
case IOMAP_MAPPED:
|
|
|
if (iomap.flags & IOMAP_F_NEW) {
|
|
@@ -1182,12 +1185,27 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
|
|
|
entry = dax_insert_mapping_entry(mapping, vmf, entry,
|
|
|
dax_iomap_sector(&iomap, pos),
|
|
|
- 0, write);
|
|
|
+ 0, write && !sync);
|
|
|
if (IS_ERR(entry)) {
|
|
|
error = PTR_ERR(entry);
|
|
|
goto error_finish_iomap;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If we are doing synchronous page fault and inode needs fsync,
|
|
|
+ * we can insert PTE into page tables only after that happens.
|
|
|
+ * Skip insertion for now and return the pfn so that caller can
|
|
|
+ * insert it after fsync is done.
|
|
|
+ */
|
|
|
+ if (sync) {
|
|
|
+ if (WARN_ON_ONCE(!pfnp)) {
|
|
|
+ error = -EIO;
|
|
|
+ goto error_finish_iomap;
|
|
|
+ }
|
|
|
+ *pfnp = pfn;
|
|
|
+ vmf_ret = VM_FAULT_NEEDDSYNC | major;
|
|
|
+ goto finish_iomap;
|
|
|
+ }
|
|
|
trace_dax_insert_mapping(inode, vmf, entry);
|
|
|
if (write)
|
|
|
error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
|
|
@@ -1287,6 +1305,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
struct address_space *mapping = vma->vm_file->f_mapping;
|
|
|
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
|
|
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
|
|
+ bool sync;
|
|
|
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
|
|
|
struct inode *inode = mapping->host;
|
|
|
int result = VM_FAULT_FALLBACK;
|
|
@@ -1371,6 +1390,8 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
if (iomap.offset + iomap.length < pos + PMD_SIZE)
|
|
|
goto finish_iomap;
|
|
|
|
|
|
+ sync = (vma->vm_flags & VM_SYNC) && (iomap.flags & IOMAP_F_DIRTY);
|
|
|
+
|
|
|
switch (iomap.type) {
|
|
|
case IOMAP_MAPPED:
|
|
|
error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
|
|
@@ -1379,10 +1400,24 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
|
|
|
|
|
entry = dax_insert_mapping_entry(mapping, vmf, entry,
|
|
|
dax_iomap_sector(&iomap, pos),
|
|
|
- RADIX_DAX_PMD, write);
|
|
|
+ RADIX_DAX_PMD, write && !sync);
|
|
|
if (IS_ERR(entry))
|
|
|
goto finish_iomap;
|
|
|
|
|
|
+ /*
|
|
|
+ * If we are doing synchronous page fault and inode needs fsync,
|
|
|
+ * we can insert PMD into page tables only after that happens.
|
|
|
+ * Skip insertion for now and return the pfn so that caller can
|
|
|
+ * insert it after fsync is done.
|
|
|
+ */
|
|
|
+ if (sync) {
|
|
|
+ if (WARN_ON_ONCE(!pfnp))
|
|
|
+ goto finish_iomap;
|
|
|
+ *pfnp = pfn;
|
|
|
+ result = VM_FAULT_NEEDDSYNC;
|
|
|
+ goto finish_iomap;
|
|
|
+ }
|
|
|
+
|
|
|
trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
|
|
|
result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
|
|
|
write);
|