|
@@ -262,23 +262,8 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
-{
|
|
|
- int err;
|
|
|
- struct inode *inode = file_inode(vma->vm_file);
|
|
|
-
|
|
|
- sb_start_pagefault(inode->i_sb);
|
|
|
- file_update_time(vma->vm_file);
|
|
|
- down_read(&EXT4_I(inode)->i_mmap_sem);
|
|
|
- err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
|
|
|
- up_read(&EXT4_I(inode)->i_mmap_sem);
|
|
|
- sb_end_pagefault(inode->i_sb);
|
|
|
-
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
- * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
|
|
|
+ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
|
|
|
* handler we check for races agaist truncate. Note that since we cycle through
|
|
|
* i_mmap_sem, we are sure that also any hole punching that began before we
|
|
|
* were called is finished by now and so if it included part of the file we
|
|
@@ -311,7 +296,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
|
|
|
static const struct vm_operations_struct ext4_dax_vm_ops = {
|
|
|
.fault = ext4_dax_fault,
|
|
|
.pmd_fault = ext4_dax_pmd_fault,
|
|
|
- .page_mkwrite = ext4_dax_mkwrite,
|
|
|
+ .page_mkwrite = ext4_dax_fault,
|
|
|
.pfn_mkwrite = ext4_dax_pfn_mkwrite,
|
|
|
};
|
|
|
#else
|