|
@@ -97,12 +97,10 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
|
|
|
return bio;
|
|
|
}
|
|
|
|
|
|
-static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
|
|
|
- struct bio *bio)
|
|
|
+static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio)
|
|
|
{
|
|
|
- if (!is_read_io(rw))
|
|
|
+ if (!is_read_io(bio_op(bio)))
|
|
|
atomic_inc(&sbi->nr_wb_bios);
|
|
|
- bio->bi_rw = rw;
|
|
|
submit_bio(bio);
|
|
|
}
|
|
|
|
|
@@ -113,12 +111,14 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
|
|
|
if (!io->bio)
|
|
|
return;
|
|
|
|
|
|
- if (is_read_io(fio->rw))
|
|
|
+ if (is_read_io(fio->op))
|
|
|
trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
|
|
|
else
|
|
|
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
|
|
|
|
|
|
- __submit_bio(io->sbi, fio->rw, io->bio);
|
|
|
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
|
|
|
+
|
|
|
+ __submit_bio(io->sbi, io->bio);
|
|
|
io->bio = NULL;
|
|
|
}
|
|
|
|
|
@@ -184,10 +184,12 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
|
|
|
/* change META to META_FLUSH in the checkpoint procedure */
|
|
|
if (type >= META_FLUSH) {
|
|
|
io->fio.type = META_FLUSH;
|
|
|
+ io->fio.op = REQ_OP_WRITE;
|
|
|
if (test_opt(sbi, NOBARRIER))
|
|
|
- io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
|
|
|
+ io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
|
|
|
else
|
|
|
- io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
|
|
|
+ io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
|
|
|
+ REQ_PRIO;
|
|
|
}
|
|
|
__submit_merged_bio(io);
|
|
|
out:
|
|
@@ -229,14 +231,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
|
|
f2fs_trace_ios(fio, 0);
|
|
|
|
|
|
/* Allocate a new bio */
|
|
|
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
|
|
|
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
|
|
|
|
|
|
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
|
|
|
bio_put(bio);
|
|
|
return -EFAULT;
|
|
|
}
|
|
|
+ bio->bi_rw = fio->op_flags;
|
|
|
+ bio_set_op_attrs(bio, fio->op, fio->op_flags);
|
|
|
|
|
|
- __submit_bio(fio->sbi, fio->rw, bio);
|
|
|
+ __submit_bio(fio->sbi, bio);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -245,7 +249,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
|
|
|
struct f2fs_sb_info *sbi = fio->sbi;
|
|
|
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
|
|
|
struct f2fs_bio_info *io;
|
|
|
- bool is_read = is_read_io(fio->rw);
|
|
|
+ bool is_read = is_read_io(fio->op);
|
|
|
struct page *bio_page;
|
|
|
|
|
|
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
|
|
@@ -257,7 +261,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
|
|
|
down_write(&io->io_rwsem);
|
|
|
|
|
|
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
|
|
|
- io->fio.rw != fio->rw))
|
|
|
+ (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
|
|
|
__submit_merged_bio(io);
|
|
|
alloc_new:
|
|
|
if (io->bio == NULL) {
|
|
@@ -391,7 +395,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
|
|
|
}
|
|
|
|
|
|
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
|
|
|
- int rw, bool for_write)
|
|
|
+ int op_flags, bool for_write)
|
|
|
{
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
struct dnode_of_data dn;
|
|
@@ -401,7 +405,8 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
|
|
|
struct f2fs_io_info fio = {
|
|
|
.sbi = F2FS_I_SB(inode),
|
|
|
.type = DATA,
|
|
|
- .rw = rw,
|
|
|
+ .op = REQ_OP_READ,
|
|
|
+ .op_flags = op_flags,
|
|
|
.encrypted_page = NULL,
|
|
|
};
|
|
|
|
|
@@ -1052,7 +1057,7 @@ got_it:
|
|
|
*/
|
|
|
if (bio && (last_block_in_bio != block_nr - 1)) {
|
|
|
submit_and_realloc:
|
|
|
- __submit_bio(F2FS_I_SB(inode), READ, bio);
|
|
|
+ __submit_bio(F2FS_I_SB(inode), bio);
|
|
|
bio = NULL;
|
|
|
}
|
|
|
if (bio == NULL) {
|
|
@@ -1081,7 +1086,7 @@ submit_and_realloc:
|
|
|
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
|
|
|
bio->bi_end_io = f2fs_read_end_io;
|
|
|
bio->bi_private = ctx;
|
|
|
- bio->bi_rw = READ;
|
|
|
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
|
|
}
|
|
|
|
|
|
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
|
@@ -1096,7 +1101,7 @@ set_error_page:
|
|
|
goto next_page;
|
|
|
confused:
|
|
|
if (bio) {
|
|
|
- __submit_bio(F2FS_I_SB(inode), READ, bio);
|
|
|
+ __submit_bio(F2FS_I_SB(inode), bio);
|
|
|
bio = NULL;
|
|
|
}
|
|
|
unlock_page(page);
|
|
@@ -1106,7 +1111,7 @@ next_page:
|
|
|
}
|
|
|
BUG_ON(pages && !list_empty(pages));
|
|
|
if (bio)
|
|
|
- __submit_bio(F2FS_I_SB(inode), READ, bio);
|
|
|
+ __submit_bio(F2FS_I_SB(inode), bio);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1223,7 +1228,8 @@ static int f2fs_write_data_page(struct page *page,
|
|
|
struct f2fs_io_info fio = {
|
|
|
.sbi = sbi,
|
|
|
.type = DATA,
|
|
|
- .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
|
|
|
+ .op = REQ_OP_WRITE,
|
|
|
+ .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
|
|
|
.page = page,
|
|
|
.encrypted_page = NULL,
|
|
|
};
|
|
@@ -1664,7 +1670,8 @@ repeat:
|
|
|
struct f2fs_io_info fio = {
|
|
|
.sbi = sbi,
|
|
|
.type = DATA,
|
|
|
- .rw = READ_SYNC,
|
|
|
+ .op = REQ_OP_READ,
|
|
|
+ .op_flags = READ_SYNC,
|
|
|
.old_blkaddr = blkaddr,
|
|
|
.new_blkaddr = blkaddr,
|
|
|
.page = page,
|