|
@@ -2332,12 +2332,13 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
|
|
*/
|
|
|
static void end_bio_extent_writepage(struct bio *bio, int err)
|
|
|
{
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
|
|
|
+ struct bio_vec *bvec;
|
|
|
struct extent_io_tree *tree;
|
|
|
u64 start;
|
|
|
u64 end;
|
|
|
+ int i;
|
|
|
|
|
|
- do {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
struct page *page = bvec->bv_page;
|
|
|
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
|
|
|
|
@@ -2355,14 +2356,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
|
|
start = page_offset(page);
|
|
|
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
|
|
|
|
|
- if (--bvec >= bio->bi_io_vec)
|
|
|
- prefetchw(&bvec->bv_page->flags);
|
|
|
-
|
|
|
if (end_extent_writepage(page, err, start, end))
|
|
|
continue;
|
|
|
|
|
|
end_page_writeback(page);
|
|
|
- } while (bvec >= bio->bi_io_vec);
|
|
|
+ }
|
|
|
|
|
|
bio_put(bio);
|
|
|
}
|
|
@@ -2392,9 +2390,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
|
|
|
*/
|
|
|
static void end_bio_extent_readpage(struct bio *bio, int err)
|
|
|
{
|
|
|
+ struct bio_vec *bvec;
|
|
|
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
- struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec;
|
|
|
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
|
|
struct extent_io_tree *tree;
|
|
|
u64 offset = 0;
|
|
@@ -2405,11 +2402,12 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|
|
u64 extent_len = 0;
|
|
|
int mirror;
|
|
|
int ret;
|
|
|
+ int i;
|
|
|
|
|
|
if (err)
|
|
|
uptodate = 0;
|
|
|
|
|
|
- do {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
struct page *page = bvec->bv_page;
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
|
|
@@ -2433,9 +2431,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|
|
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
|
|
len = bvec->bv_len;
|
|
|
|
|
|
- if (++bvec <= bvec_end)
|
|
|
- prefetchw(&bvec->bv_page->flags);
|
|
|
-
|
|
|
mirror = io_bio->mirror_num;
|
|
|
if (likely(uptodate && tree->ops &&
|
|
|
tree->ops->readpage_end_io_hook)) {
|
|
@@ -2516,7 +2511,7 @@ readpage_ok:
|
|
|
extent_start = start;
|
|
|
extent_len = end + 1 - start;
|
|
|
}
|
|
|
- } while (bvec <= bvec_end);
|
|
|
+ }
|
|
|
|
|
|
if (extent_len)
|
|
|
endio_readpage_release_extent(tree, extent_start, extent_len,
|
|
@@ -2547,7 +2542,6 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
|
|
|
}
|
|
|
|
|
|
if (bio) {
|
|
|
- bio->bi_size = 0;
|
|
|
bio->bi_bdev = bdev;
|
|
|
bio->bi_sector = first_sector;
|
|
|
btrfs_bio = btrfs_io_bio(bio);
|
|
@@ -3410,20 +3404,18 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
|
|
|
|
|
|
static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
|
|
|
{
|
|
|
- int uptodate = err == 0;
|
|
|
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
|
|
|
+ struct bio_vec *bvec;
|
|
|
struct extent_buffer *eb;
|
|
|
- int done;
|
|
|
+ int i, done;
|
|
|
|
|
|
- do {
|
|
|
+ bio_for_each_segment_all(bvec, bio, i) {
|
|
|
struct page *page = bvec->bv_page;
|
|
|
|
|
|
- bvec--;
|
|
|
eb = (struct extent_buffer *)page->private;
|
|
|
BUG_ON(!eb);
|
|
|
done = atomic_dec_and_test(&eb->io_pages);
|
|
|
|
|
|
- if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
|
|
|
+ if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
|
|
|
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
|
|
|
ClearPageUptodate(page);
|
|
|
SetPageError(page);
|
|
@@ -3435,10 +3427,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
|
|
|
continue;
|
|
|
|
|
|
end_extent_buffer_writeback(eb);
|
|
|
- } while (bvec >= bio->bi_io_vec);
|
|
|
+ }
|
|
|
|
|
|
bio_put(bio);
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static int write_one_eb(struct extent_buffer *eb,
|