|
@@ -1572,11 +1572,46 @@ xfs_filemap_pmd_fault(
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * pfn_mkwrite was originally inteneded to ensure we capture time stamp
|
|
|
+ * updates on write faults. In reality, it's need to serialise against
|
|
|
+ * truncate similar to page_mkwrite. Hence we open-code dax_pfn_mkwrite()
|
|
|
+ * here and cycle the XFS_MMAPLOCK_SHARED to ensure we serialise the fault
|
|
|
+ * barrier in place.
|
|
|
+ */
|
|
|
+static int
|
|
|
+xfs_filemap_pfn_mkwrite(
|
|
|
+ struct vm_area_struct *vma,
|
|
|
+ struct vm_fault *vmf)
|
|
|
+{
|
|
|
+
|
|
|
+ struct inode *inode = file_inode(vma->vm_file);
|
|
|
+ struct xfs_inode *ip = XFS_I(inode);
|
|
|
+ int ret = VM_FAULT_NOPAGE;
|
|
|
+ loff_t size;
|
|
|
+
|
|
|
+ trace_xfs_filemap_pfn_mkwrite(ip);
|
|
|
+
|
|
|
+ sb_start_pagefault(inode->i_sb);
|
|
|
+ file_update_time(vma->vm_file);
|
|
|
+
|
|
|
+ /* check if the faulting page hasn't raced with truncate */
|
|
|
+ xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
|
|
|
+ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
+ if (vmf->pgoff >= size)
|
|
|
+ ret = VM_FAULT_SIGBUS;
|
|
|
+ xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
|
|
|
+ sb_end_pagefault(inode->i_sb);
|
|
|
+ return ret;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
static const struct vm_operations_struct xfs_file_vm_ops = {
|
|
|
.fault = xfs_filemap_fault,
|
|
|
.pmd_fault = xfs_filemap_pmd_fault,
|
|
|
.map_pages = filemap_map_pages,
|
|
|
.page_mkwrite = xfs_filemap_page_mkwrite,
|
|
|
+ .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
|
|
|
};
|
|
|
|
|
|
STATIC int
|