|
@@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)
|
|
|
* for the whole file.
|
|
|
*/
|
|
|
static inline ssize_t
|
|
|
-do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
|
|
|
- get_block_t get_block, dio_iodone_t end_io,
|
|
|
- dio_submit_t submit_io, int flags)
|
|
|
+do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|
|
+ struct block_device *bdev, struct iov_iter *iter,
|
|
|
+ loff_t offset, get_block_t get_block, dio_iodone_t end_io,
|
|
|
+ dio_submit_t submit_io, int flags)
|
|
|
{
|
|
|
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
|
|
|
unsigned blkbits = i_blkbits;
|
|
@@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
struct blk_plug plug;
|
|
|
unsigned long align = offset | iov_iter_alignment(iter);
|
|
|
|
|
|
- if (rw & WRITE)
|
|
|
- rw = WRITE_ODIRECT;
|
|
|
-
|
|
|
/*
|
|
|
* Avoid references to bdev if not absolutely needed to give
|
|
|
* the early prefetch in the caller enough time.
|
|
@@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
}
|
|
|
|
|
|
/* watch out for a 0 len io from a tricksy fs */
|
|
|
- if (rw == READ && !iov_iter_count(iter))
|
|
|
+ if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
|
|
|
return 0;
|
|
|
|
|
|
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
|
|
@@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
|
|
|
dio->flags = flags;
|
|
|
if (dio->flags & DIO_LOCKING) {
|
|
|
- if (rw == READ) {
|
|
|
+ if (iov_iter_rw(iter) == READ) {
|
|
|
struct address_space *mapping =
|
|
|
iocb->ki_filp->f_mapping;
|
|
|
|
|
@@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
if (is_sync_kiocb(iocb))
|
|
|
dio->is_async = false;
|
|
|
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
|
|
|
- (rw & WRITE) && end > i_size_read(inode))
|
|
|
+ iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
|
|
|
dio->is_async = false;
|
|
|
else
|
|
|
dio->is_async = true;
|
|
|
|
|
|
dio->inode = inode;
|
|
|
- dio->rw = rw;
|
|
|
+ dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
|
|
|
|
|
|
/*
|
|
|
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
|
|
|
* so that we can call ->fsync.
|
|
|
*/
|
|
|
- if (dio->is_async && (rw & WRITE) &&
|
|
|
+ if (dio->is_async && iov_iter_rw(iter) == WRITE &&
|
|
|
((iocb->ki_filp->f_flags & O_DSYNC) ||
|
|
|
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
|
|
|
retval = dio_set_defer_completion(dio);
|
|
@@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
* we can let i_mutex go now that its achieved its purpose
|
|
|
* of protecting us from looking up uninitialized blocks.
|
|
|
*/
|
|
|
- if (rw == READ && (dio->flags & DIO_LOCKING))
|
|
|
+ if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
|
|
|
mutex_unlock(&dio->inode->i_mutex);
|
|
|
|
|
|
/*
|
|
@@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
*/
|
|
|
BUG_ON(retval == -EIOCBQUEUED);
|
|
|
if (dio->is_async && retval == 0 && dio->result &&
|
|
|
- (rw == READ || dio->result == count))
|
|
|
+ (iov_iter_rw(iter) == READ || dio->result == count))
|
|
|
retval = -EIOCBQUEUED;
|
|
|
else
|
|
|
dio_await_completion(dio);
|
|
@@ -1300,11 +1297,11 @@ out:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
-ssize_t
|
|
|
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
|
|
|
- get_block_t get_block, dio_iodone_t end_io,
|
|
|
- dio_submit_t submit_io, int flags)
|
|
|
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|
|
+ struct block_device *bdev, struct iov_iter *iter,
|
|
|
+ loff_t offset, get_block_t get_block,
|
|
|
+ dio_iodone_t end_io, dio_submit_t submit_io,
|
|
|
+ int flags)
|
|
|
{
|
|
|
/*
|
|
|
* The block device state is needed in the end to finally
|
|
@@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
|
|
prefetch(bdev->bd_queue);
|
|
|
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
|
|
|
|
|
|
- return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
|
|
|
- get_block, end_io, submit_io, flags);
|
|
|
+ return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
|
|
|
+ end_io, submit_io, flags);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(__blockdev_direct_IO);
|