|
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
|
|
|
loff_t pos)
|
|
|
{
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
+ struct inode *inode = file_inode(file);
|
|
|
ssize_t written;
|
|
|
ssize_t written_buffered;
|
|
|
loff_t endbyte;
|
|
@@ -1697,13 +1698,10 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
|
|
|
* able to read what was just written.
|
|
|
*/
|
|
|
endbyte = pos + written_buffered - 1;
|
|
|
- err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
|
|
|
- if (!err && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
|
|
- &BTRFS_I(file_inode(file))->runtime_flags))
|
|
|
- err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
|
|
|
+ err = btrfs_fdatawrite_range(inode, pos, endbyte);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
- err = filemap_fdatawait_range(file->f_mapping, pos, endbyte);
|
|
|
+ err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
written += written_buffered;
|
|
@@ -1864,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
|
|
|
int ret;
|
|
|
|
|
|
atomic_inc(&BTRFS_I(inode)->sync_writers);
|
|
|
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
|
|
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
|
|
- &BTRFS_I(inode)->runtime_flags))
|
|
|
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
|
|
+ ret = btrfs_fdatawrite_range(inode, start, end);
|
|
|
atomic_dec(&BTRFS_I(inode)->sync_writers);
|
|
|
|
|
|
return ret;
|
|
@@ -2820,3 +2815,29 @@ int btrfs_auto_defrag_init(void)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * So with compression we will find and lock a dirty page and clear the
|
|
|
+ * first one as dirty, setup an async extent, and immediately return
|
|
|
+ * with the entire range locked but with nobody actually marked with
|
|
|
+ * writeback. So we can't just filemap_write_and_wait_range() and
|
|
|
+ * expect it to work since it will just kick off a thread to do the
|
|
|
+ * actual work. So we need to call filemap_fdatawrite_range _again_
|
|
|
+ * since it will wait on the page lock, which won't be unlocked until
|
|
|
+ * after the pages have been marked as writeback and so we're good to go
|
|
|
+ * from there. We have to do this otherwise we'll miss the ordered
|
|
|
+ * extents and that results in badness. Please Josef, do not think you
|
|
|
+ * know better and pull this out at some point in the future, it is
|
|
|
+ * right and you are wrong.
|
|
|
+ */
|
|
|
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
|
|
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
|
|
|
+ &BTRFS_I(inode)->runtime_flags))
|
|
|
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|