|
@@ -1989,7 +1989,7 @@ again:
|
|
|
page_start = page_offset(page);
|
|
|
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
|
|
|
|
|
|
- lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
|
|
|
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
|
|
|
&cached_state);
|
|
|
|
|
|
/* already ordered? We're done */
|
|
@@ -2482,7 +2482,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
|
|
|
lock_start = backref->file_pos;
|
|
|
lock_end = backref->file_pos + backref->num_bytes - 1;
|
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
|
|
|
- 0, &cached);
|
|
|
+ &cached);
|
|
|
|
|
|
ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
|
|
|
if (ordered) {
|
|
@@ -2874,7 +2874,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
|
|
|
|
|
|
lock_extent_bits(io_tree, ordered_extent->file_offset,
|
|
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
|
|
- 0, &cached_state);
|
|
|
+ &cached_state);
|
|
|
|
|
|
ret = test_range_bit(io_tree, ordered_extent->file_offset,
|
|
|
ordered_extent->file_offset + ordered_extent->len - 1,
|
|
@@ -4690,7 +4690,7 @@ again:
|
|
|
}
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
|
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
|
|
+ lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
|
|
set_page_extent_mapped(page);
|
|
|
|
|
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
|
@@ -4821,7 +4821,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|
|
while (1) {
|
|
|
struct btrfs_ordered_extent *ordered;
|
|
|
|
|
|
- lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
|
|
|
+ lock_extent_bits(io_tree, hole_start, block_end - 1,
|
|
|
&cached_state);
|
|
|
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
|
|
block_end - hole_start);
|
|
@@ -5133,7 +5133,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
|
|
|
end = state->end;
|
|
|
spin_unlock(&io_tree->lock);
|
|
|
|
|
|
- lock_extent_bits(io_tree, start, end, 0, &cached_state);
|
|
|
+ lock_extent_bits(io_tree, start, end, &cached_state);
|
|
|
|
|
|
/*
|
|
|
* If still has DELALLOC flag, the extent didn't reach disk,
|
|
@@ -7402,7 +7402,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|
|
|
|
|
while (1) {
|
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
|
|
- 0, cached_state);
|
|
|
+ cached_state);
|
|
|
/*
|
|
|
* We're concerned with the entire range that we're going to be
|
|
|
* doing DIO to, so we need to make sure theres no ordered
|
|
@@ -8636,7 +8636,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|
|
}
|
|
|
|
|
|
if (!inode_evicting)
|
|
|
- lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
|
|
|
+ lock_extent_bits(tree, page_start, page_end, &cached_state);
|
|
|
ordered = btrfs_lookup_ordered_extent(inode, page_start);
|
|
|
if (ordered) {
|
|
|
/*
|
|
@@ -8674,7 +8674,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|
|
btrfs_put_ordered_extent(ordered);
|
|
|
if (!inode_evicting) {
|
|
|
cached_state = NULL;
|
|
|
- lock_extent_bits(tree, page_start, page_end, 0,
|
|
|
+ lock_extent_bits(tree, page_start, page_end,
|
|
|
&cached_state);
|
|
|
}
|
|
|
}
|
|
@@ -8772,7 +8772,7 @@ again:
|
|
|
}
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
|
- lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
|
|
|
+ lock_extent_bits(io_tree, page_start, page_end, &cached_state);
|
|
|
set_page_extent_mapped(page);
|
|
|
|
|
|
/*
|