|
@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void fuse_io_release(struct kref *kref)
|
|
|
+{
|
|
|
+ kfree(container_of(kref, struct fuse_io_priv, refcnt));
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
|
|
|
{
|
|
|
if (io->err)
|
|
@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
|
|
|
}
|
|
|
|
|
|
io->iocb->ki_complete(io->iocb, res, 0);
|
|
|
- kfree(io);
|
|
|
}
|
|
|
+
|
|
|
+ kref_put(&io->refcnt, fuse_io_release);
|
|
|
}
|
|
|
|
|
|
static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
|
|
@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
|
|
|
size_t num_bytes, struct fuse_io_priv *io)
|
|
|
{
|
|
|
spin_lock(&io->lock);
|
|
|
+ kref_get(&io->refcnt);
|
|
|
io->size += num_bytes;
|
|
|
io->reqs++;
|
|
|
spin_unlock(&io->lock);
|
|
@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
|
|
|
|
|
|
static int fuse_do_readpage(struct file *file, struct page *page)
|
|
|
{
|
|
|
- struct fuse_io_priv io = { .async = 0, .file = file };
|
|
|
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
struct fuse_conn *fc = get_fuse_conn(inode);
|
|
|
struct fuse_req *req;
|
|
@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
|
|
|
size_t res;
|
|
|
unsigned offset;
|
|
|
unsigned i;
|
|
|
- struct fuse_io_priv io = { .async = 0, .file = file };
|
|
|
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
|
|
|
|
|
|
for (i = 0; i < req->num_pages; i++)
|
|
|
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
|
|
@@ -1240,6 +1247,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
|
|
|
size_t *nbytesp, int write)
|
|
|
{
|
|
|
size_t nbytes = 0; /* # bytes already packed in req */
|
|
|
+ ssize_t ret = 0;
|
|
|
|
|
|
/* Special case for kernel I/O: can copy directly into the buffer */
|
|
|
if (ii->type & ITER_KVEC) {
|
|
@@ -1259,13 +1267,12 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
|
|
|
while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
|
|
|
unsigned npages;
|
|
|
size_t start;
|
|
|
- ssize_t ret = iov_iter_get_pages(ii,
|
|
|
- &req->pages[req->num_pages],
|
|
|
+ ret = iov_iter_get_pages(ii, &req->pages[req->num_pages],
|
|
|
*nbytesp - nbytes,
|
|
|
req->max_pages - req->num_pages,
|
|
|
&start);
|
|
|
if (ret < 0)
|
|
|
- return ret;
|
|
|
+ break;
|
|
|
|
|
|
iov_iter_advance(ii, ret);
|
|
|
nbytes += ret;
|
|
@@ -1288,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
|
|
|
|
|
|
*nbytesp = nbytes;
|
|
|
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static inline int fuse_iter_npages(const struct iov_iter *ii_p)
|
|
@@ -1312,6 +1319,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|
|
pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT;
|
|
|
ssize_t res = 0;
|
|
|
struct fuse_req *req;
|
|
|
+ int err = 0;
|
|
|
|
|
|
if (io->async)
|
|
|
req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
|
|
@@ -1332,11 +1340,9 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|
|
size_t nres;
|
|
|
fl_owner_t owner = current->files;
|
|
|
size_t nbytes = min(count, nmax);
|
|
|
- int err = fuse_get_user_pages(req, iter, &nbytes, write);
|
|
|
- if (err) {
|
|
|
- res = err;
|
|
|
+ err = fuse_get_user_pages(req, iter, &nbytes, write);
|
|
|
+ if (err && !nbytes)
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
if (write)
|
|
|
nres = fuse_send_write(req, io, pos, nbytes, owner);
|
|
@@ -1346,11 +1352,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|
|
if (!io->async)
|
|
|
fuse_release_user_pages(req, !write);
|
|
|
if (req->out.h.error) {
|
|
|
- if (!res)
|
|
|
- res = req->out.h.error;
|
|
|
+ err = req->out.h.error;
|
|
|
break;
|
|
|
} else if (nres > nbytes) {
|
|
|
- res = -EIO;
|
|
|
+ res = 0;
|
|
|
+ err = -EIO;
|
|
|
break;
|
|
|
}
|
|
|
count -= nres;
|
|
@@ -1374,7 +1380,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
|
|
|
if (res > 0)
|
|
|
*ppos = pos;
|
|
|
|
|
|
- return res;
|
|
|
+ return res > 0 ? res : err;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(fuse_direct_io);
|
|
|
|
|
@@ -1398,7 +1404,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
|
|
|
|
|
|
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|
|
{
|
|
|
- struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
|
|
|
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
|
|
|
return __fuse_direct_read(&io, to, &iocb->ki_pos);
|
|
|
}
|
|
|
|
|
@@ -1406,7 +1412,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
|
{
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
struct inode *inode = file_inode(file);
|
|
|
- struct fuse_io_priv io = { .async = 0, .file = file };
|
|
|
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
|
|
|
ssize_t res;
|
|
|
|
|
|
if (is_bad_inode(inode))
|
|
@@ -2843,6 +2849,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
|
|
loff_t i_size;
|
|
|
size_t count = iov_iter_count(iter);
|
|
|
struct fuse_io_priv *io;
|
|
|
+ bool is_sync = is_sync_kiocb(iocb);
|
|
|
|
|
|
pos = offset;
|
|
|
inode = file->f_mapping->host;
|
|
@@ -2863,6 +2870,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
|
|
if (!io)
|
|
|
return -ENOMEM;
|
|
|
spin_lock_init(&io->lock);
|
|
|
+ kref_init(&io->refcnt);
|
|
|
io->reqs = 1;
|
|
|
io->bytes = -1;
|
|
|
io->size = 0;
|
|
@@ -2882,12 +2890,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
|
|
* to wait on real async I/O requests, so we must submit this request
|
|
|
* synchronously.
|
|
|
*/
|
|
|
- if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
|
|
|
+ if (!is_sync && (offset + count > i_size) &&
|
|
|
iov_iter_rw(iter) == WRITE)
|
|
|
io->async = false;
|
|
|
|
|
|
- if (io->async && is_sync_kiocb(iocb))
|
|
|
+ if (io->async && is_sync) {
|
|
|
+ /*
|
|
|
+ * Additional reference to keep io around after
|
|
|
+ * calling fuse_aio_complete()
|
|
|
+ */
|
|
|
+ kref_get(&io->refcnt);
|
|
|
io->done = &wait;
|
|
|
+ }
|
|
|
|
|
|
if (iov_iter_rw(iter) == WRITE) {
|
|
|
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
|
|
@@ -2900,14 +2914,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
|
|
|
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
|
|
|
|
|
|
/* we have a non-extending, async request, so return */
|
|
|
- if (!is_sync_kiocb(iocb))
|
|
|
+ if (!is_sync)
|
|
|
return -EIOCBQUEUED;
|
|
|
|
|
|
wait_for_completion(&wait);
|
|
|
ret = fuse_get_res_by_io(io);
|
|
|
}
|
|
|
|
|
|
- kfree(io);
|
|
|
+ kref_put(&io->refcnt, fuse_io_release);
|
|
|
|
|
|
if (iov_iter_rw(iter) == WRITE) {
|
|
|
if (ret > 0)
|