|
@@ -7215,7 +7215,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
|
|
struct extent_map *em = NULL;
|
|
struct extent_map *em = NULL;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- down_read(&BTRFS_I(inode)->dio_sem);
|
|
|
|
if (type != BTRFS_ORDERED_NOCOW) {
|
|
if (type != BTRFS_ORDERED_NOCOW) {
|
|
em = create_pinned_em(inode, start, len, orig_start,
|
|
em = create_pinned_em(inode, start, len, orig_start,
|
|
block_start, block_len, orig_block_len,
|
|
block_start, block_len, orig_block_len,
|
|
@@ -7234,7 +7233,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
|
|
em = ERR_PTR(ret);
|
|
em = ERR_PTR(ret);
|
|
}
|
|
}
|
|
out:
|
|
out:
|
|
- up_read(&BTRFS_I(inode)->dio_sem);
|
|
|
|
|
|
|
|
return em;
|
|
return em;
|
|
}
|
|
}
|
|
@@ -8695,6 +8693,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|
dio_data.unsubmitted_oe_range_start = (u64)offset;
|
|
dio_data.unsubmitted_oe_range_start = (u64)offset;
|
|
dio_data.unsubmitted_oe_range_end = (u64)offset;
|
|
dio_data.unsubmitted_oe_range_end = (u64)offset;
|
|
current->journal_info = &dio_data;
|
|
current->journal_info = &dio_data;
|
|
|
|
+ down_read(&BTRFS_I(inode)->dio_sem);
|
|
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
|
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
|
&BTRFS_I(inode)->runtime_flags)) {
|
|
&BTRFS_I(inode)->runtime_flags)) {
|
|
inode_dio_end(inode);
|
|
inode_dio_end(inode);
|
|
@@ -8707,6 +8706,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|
iter, btrfs_get_blocks_direct, NULL,
|
|
iter, btrfs_get_blocks_direct, NULL,
|
|
btrfs_submit_direct, flags);
|
|
btrfs_submit_direct, flags);
|
|
if (iov_iter_rw(iter) == WRITE) {
|
|
if (iov_iter_rw(iter) == WRITE) {
|
|
|
|
+ up_read(&BTRFS_I(inode)->dio_sem);
|
|
current->journal_info = NULL;
|
|
current->journal_info = NULL;
|
|
if (ret < 0 && ret != -EIOCBQUEUED) {
|
|
if (ret < 0 && ret != -EIOCBQUEUED) {
|
|
if (dio_data.reserve)
|
|
if (dio_data.reserve)
|