|
|
@@ -108,7 +108,8 @@ struct dio_submit {
|
|
|
/* dio_state communicated between submission path and end_io */
|
|
|
struct dio {
|
|
|
int flags; /* doesn't change */
|
|
|
- int rw;
|
|
|
+ int op;
|
|
|
+ int op_flags;
|
|
|
blk_qc_t bio_cookie;
|
|
|
struct block_device *bio_bdev;
|
|
|
struct inode *inode;
|
|
|
@@ -163,7 +164,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
|
|
|
ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
|
|
|
&sdio->from);
|
|
|
|
|
|
- if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
|
|
|
+ if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
|
|
|
struct page *page = ZERO_PAGE(0);
|
|
|
/*
|
|
|
* A memory fault, but the filesystem has some outstanding
|
|
|
@@ -242,7 +243,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
|
|
|
transferred = dio->result;
|
|
|
|
|
|
/* Check for short read case */
|
|
|
- if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
|
|
|
+ if ((dio->op == REQ_OP_READ) &&
|
|
|
+ ((offset + transferred) > dio->i_size))
|
|
|
transferred = dio->i_size - offset;
|
|
|
}
|
|
|
|
|
|
@@ -273,7 +275,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
|
|
|
*/
|
|
|
dio->iocb->ki_pos += transferred;
|
|
|
|
|
|
- if (dio->rw & WRITE)
|
|
|
+ if (dio->op == REQ_OP_WRITE)
|
|
|
ret = generic_write_sync(dio->iocb, transferred);
|
|
|
dio->iocb->ki_complete(dio->iocb, ret, 0);
|
|
|
}
|
|
|
@@ -375,7 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
|
|
|
|
|
|
bio->bi_bdev = bdev;
|
|
|
bio->bi_iter.bi_sector = first_sector;
|
|
|
- bio->bi_rw = dio->rw;
|
|
|
+ bio_set_op_attrs(bio, dio->op, dio->op_flags);
|
|
|
if (dio->is_async)
|
|
|
bio->bi_end_io = dio_bio_end_aio;
|
|
|
else
|
|
|
@@ -403,14 +405,13 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
|
|
|
dio->refcount++;
|
|
|
spin_unlock_irqrestore(&dio->bio_lock, flags);
|
|
|
|
|
|
- if (dio->is_async && dio->rw == READ && dio->should_dirty)
|
|
|
+ if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
|
|
|
bio_set_pages_dirty(bio);
|
|
|
|
|
|
dio->bio_bdev = bio->bi_bdev;
|
|
|
|
|
|
if (sdio->submit_io) {
|
|
|
- sdio->submit_io(dio->rw, bio, dio->inode,
|
|
|
- sdio->logical_offset_in_bio);
|
|
|
+ sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
|
|
|
dio->bio_cookie = BLK_QC_T_NONE;
|
|
|
} else
|
|
|
dio->bio_cookie = submit_bio(bio);
|
|
|
@@ -479,14 +480,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
|
|
|
if (bio->bi_error)
|
|
|
dio->io_error = -EIO;
|
|
|
|
|
|
- if (dio->is_async && dio->rw == READ && dio->should_dirty) {
|
|
|
+ if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
|
|
|
err = bio->bi_error;
|
|
|
bio_check_pages_dirty(bio); /* transfers ownership */
|
|
|
} else {
|
|
|
bio_for_each_segment_all(bvec, bio, i) {
|
|
|
struct page *page = bvec->bv_page;
|
|
|
|
|
|
- if (dio->rw == READ && !PageCompound(page) &&
|
|
|
+ if (dio->op == REQ_OP_READ && !PageCompound(page) &&
|
|
|
dio->should_dirty)
|
|
|
set_page_dirty_lock(page);
|
|
|
put_page(page);
|
|
|
@@ -639,7 +640,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
|
|
|
* which may decide to handle it or also return an unmapped
|
|
|
* buffer head.
|
|
|
*/
|
|
|
- create = dio->rw & WRITE;
|
|
|
+ create = dio->op == REQ_OP_WRITE;
|
|
|
if (dio->flags & DIO_SKIP_HOLES) {
|
|
|
if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
|
|
|
i_blkbits))
|
|
|
@@ -789,7 +790,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
|
|
|
{
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (dio->rw & WRITE) {
|
|
|
+ if (dio->op == REQ_OP_WRITE) {
|
|
|
/*
|
|
|
* Read accounting is performed in submit_bio()
|
|
|
*/
|
|
|
@@ -989,7 +990,7 @@ do_holes:
|
|
|
loff_t i_size_aligned;
|
|
|
|
|
|
/* AKPM: eargh, -ENOTBLK is a hack */
|
|
|
- if (dio->rw & WRITE) {
|
|
|
+ if (dio->op == REQ_OP_WRITE) {
|
|
|
put_page(page);
|
|
|
return -ENOTBLK;
|
|
|
}
|
|
|
@@ -1203,7 +1204,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|
|
dio->is_async = true;
|
|
|
|
|
|
dio->inode = inode;
|
|
|
- dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
|
|
|
+ if (iov_iter_rw(iter) == WRITE) {
|
|
|
+ dio->op = REQ_OP_WRITE;
|
|
|
+ dio->op_flags = WRITE_ODIRECT;
|
|
|
+ } else {
|
|
|
+ dio->op = REQ_OP_READ;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
|