|
@@ -1939,11 +1939,12 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
|
|
SetPageUptodate(page);
|
|
|
}
|
|
|
|
|
|
-int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
|
|
+int free_io_failure(struct extent_io_tree *failure_tree,
|
|
|
+ struct extent_io_tree *io_tree,
|
|
|
+ struct io_failure_record *rec)
|
|
|
{
|
|
|
int ret;
|
|
|
int err = 0;
|
|
|
- struct extent_io_tree *failure_tree = &inode->io_failure_tree;
|
|
|
|
|
|
set_state_failrec(failure_tree, rec->start, NULL);
|
|
|
ret = clear_extent_bits(failure_tree, rec->start,
|
|
@@ -1952,7 +1953,7 @@ int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
|
|
if (ret)
|
|
|
err = ret;
|
|
|
|
|
|
- ret = clear_extent_bits(&inode->io_tree, rec->start,
|
|
|
+ ret = clear_extent_bits(io_tree, rec->start,
|
|
|
rec->start + rec->len - 1,
|
|
|
EXTENT_DAMAGED);
|
|
|
if (ret && !err)
|
|
@@ -2081,24 +2082,24 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
|
|
* each time an IO finishes, we do a fast check in the IO failure tree
|
|
|
* to see if we need to process or clean up an io_failure_record
|
|
|
*/
|
|
|
-int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
|
|
|
- unsigned int pg_offset)
|
|
|
+int clean_io_failure(struct btrfs_fs_info *fs_info,
|
|
|
+ struct extent_io_tree *failure_tree,
|
|
|
+ struct extent_io_tree *io_tree, u64 start,
|
|
|
+ struct page *page, u64 ino, unsigned int pg_offset)
|
|
|
{
|
|
|
u64 private;
|
|
|
struct io_failure_record *failrec;
|
|
|
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
|
|
struct extent_state *state;
|
|
|
int num_copies;
|
|
|
int ret;
|
|
|
|
|
|
private = 0;
|
|
|
- ret = count_range_bits(&inode->io_failure_tree, &private,
|
|
|
- (u64)-1, 1, EXTENT_DIRTY, 0);
|
|
|
+ ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
|
|
|
+ EXTENT_DIRTY, 0);
|
|
|
if (!ret)
|
|
|
return 0;
|
|
|
|
|
|
- ret = get_state_failrec(&inode->io_failure_tree, start,
|
|
|
- &failrec);
|
|
|
+ ret = get_state_failrec(failure_tree, start, &failrec);
|
|
|
if (ret)
|
|
|
return 0;
|
|
|
|
|
@@ -2114,25 +2115,25 @@ int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
|
|
|
if (fs_info->sb->s_flags & MS_RDONLY)
|
|
|
goto out;
|
|
|
|
|
|
- spin_lock(&inode->io_tree.lock);
|
|
|
- state = find_first_extent_bit_state(&inode->io_tree,
|
|
|
+ spin_lock(&io_tree->lock);
|
|
|
+ state = find_first_extent_bit_state(io_tree,
|
|
|
failrec->start,
|
|
|
EXTENT_LOCKED);
|
|
|
- spin_unlock(&inode->io_tree.lock);
|
|
|
+ spin_unlock(&io_tree->lock);
|
|
|
|
|
|
if (state && state->start <= failrec->start &&
|
|
|
state->end >= failrec->start + failrec->len - 1) {
|
|
|
num_copies = btrfs_num_copies(fs_info, failrec->logical,
|
|
|
failrec->len);
|
|
|
if (num_copies > 1) {
|
|
|
- repair_io_failure(fs_info, btrfs_ino(inode), start,
|
|
|
- failrec->len, failrec->logical, page,
|
|
|
- pg_offset, failrec->failed_mirror);
|
|
|
+ repair_io_failure(fs_info, ino, start, failrec->len,
|
|
|
+ failrec->logical, page, pg_offset,
|
|
|
+ failrec->failed_mirror);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
out:
|
|
|
- free_io_failure(inode, failrec);
|
|
|
+ free_io_failure(failure_tree, io_tree, failrec);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2373,6 +2374,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|
|
struct io_failure_record *failrec;
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
|
|
|
+ struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
|
|
struct bio *bio;
|
|
|
int read_mode = 0;
|
|
|
int ret;
|
|
@@ -2385,7 +2387,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|
|
|
|
|
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
|
|
|
if (!ret) {
|
|
|
- free_io_failure(BTRFS_I(inode), failrec);
|
|
|
+ free_io_failure(failure_tree, tree, failrec);
|
|
|
return -EIO;
|
|
|
}
|
|
|
|
|
@@ -2398,7 +2400,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|
|
(int)phy_offset, failed_bio->bi_end_io,
|
|
|
NULL);
|
|
|
if (!bio) {
|
|
|
- free_io_failure(BTRFS_I(inode), failrec);
|
|
|
+ free_io_failure(failure_tree, tree, failrec);
|
|
|
return -EIO;
|
|
|
}
|
|
|
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
|
@@ -2410,7 +2412,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|
|
ret = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
|
|
|
failrec->bio_flags, 0);
|
|
|
if (ret) {
|
|
|
- free_io_failure(BTRFS_I(inode), failrec);
|
|
|
+ free_io_failure(failure_tree, tree, failrec);
|
|
|
bio_put(bio);
|
|
|
}
|
|
|
|
|
@@ -2514,7 +2516,7 @@ static void end_bio_extent_readpage(struct bio *bio)
|
|
|
struct bio_vec *bvec;
|
|
|
int uptodate = !bio->bi_error;
|
|
|
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
|
|
- struct extent_io_tree *tree;
|
|
|
+ struct extent_io_tree *tree, *failure_tree;
|
|
|
u64 offset = 0;
|
|
|
u64 start;
|
|
|
u64 end;
|
|
@@ -2535,6 +2537,7 @@ static void end_bio_extent_readpage(struct bio *bio)
|
|
|
(u64)bio->bi_iter.bi_sector, bio->bi_error,
|
|
|
io_bio->mirror_num);
|
|
|
tree = &BTRFS_I(inode)->io_tree;
|
|
|
+ failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
|
|
|
|
|
/* We always issue full-page reads, but if some block
|
|
|
* in a page fails to read, blk_update_request() will
|
|
@@ -2564,8 +2567,10 @@ static void end_bio_extent_readpage(struct bio *bio)
|
|
|
if (ret)
|
|
|
uptodate = 0;
|
|
|
else
|
|
|
- clean_io_failure(BTRFS_I(inode), start,
|
|
|
- page, 0);
|
|
|
+ clean_io_failure(BTRFS_I(inode)->root->fs_info,
|
|
|
+ failure_tree, tree, start,
|
|
|
+ page,
|
|
|
+ btrfs_ino(BTRFS_I(inode)), 0);
|
|
|
}
|
|
|
|
|
|
if (likely(uptodate))
|