|
@@ -224,9 +224,9 @@ static inline struct page *dio_get_page(struct dio *dio,
|
|
* filesystems can use it to hold additional state between get_block calls and
|
|
* filesystems can use it to hold additional state between get_block calls and
|
|
* dio_complete.
|
|
* dio_complete.
|
|
*/
|
|
*/
|
|
-static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
|
|
|
|
- bool is_async)
|
|
|
|
|
|
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
|
|
{
|
|
{
|
|
|
|
+ loff_t offset = dio->iocb->ki_pos;
|
|
ssize_t transferred = 0;
|
|
ssize_t transferred = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -285,7 +285,7 @@ static void dio_aio_complete_work(struct work_struct *work)
|
|
{
|
|
{
|
|
struct dio *dio = container_of(work, struct dio, complete_work);
|
|
struct dio *dio = container_of(work, struct dio, complete_work);
|
|
|
|
|
|
- dio_complete(dio, dio->iocb->ki_pos, 0, true);
|
|
|
|
|
|
+ dio_complete(dio, 0, true);
|
|
}
|
|
}
|
|
|
|
|
|
static int dio_bio_complete(struct dio *dio, struct bio *bio);
|
|
static int dio_bio_complete(struct dio *dio, struct bio *bio);
|
|
@@ -314,7 +314,7 @@ static void dio_bio_end_aio(struct bio *bio)
|
|
queue_work(dio->inode->i_sb->s_dio_done_wq,
|
|
queue_work(dio->inode->i_sb->s_dio_done_wq,
|
|
&dio->complete_work);
|
|
&dio->complete_work);
|
|
} else {
|
|
} else {
|
|
- dio_complete(dio, dio->iocb->ki_pos, 0, true);
|
|
|
|
|
|
+ dio_complete(dio, 0, true);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1319,7 +1319,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
|
dio_await_completion(dio);
|
|
dio_await_completion(dio);
|
|
|
|
|
|
if (drop_refcount(dio) == 0) {
|
|
if (drop_refcount(dio) == 0) {
|
|
- retval = dio_complete(dio, offset, retval, false);
|
|
|
|
|
|
+ retval = dio_complete(dio, retval, false);
|
|
} else
|
|
} else
|
|
BUG_ON(retval != -EIOCBQUEUED);
|
|
BUG_ON(retval != -EIOCBQUEUED);
|
|
|
|
|