|
@@ -5,6 +5,7 @@
|
|
|
* Implements an efficient asynchronous io interface.
|
|
|
*
|
|
|
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
|
|
|
+ * Copyright 2018 Christoph Hellwig.
|
|
|
*
|
|
|
* See ../COPYING for licensing terms.
|
|
|
*/
|
|
@@ -164,10 +165,22 @@ struct fsync_iocb {
|
|
|
bool datasync;
|
|
|
};
|
|
|
|
|
|
+struct poll_iocb {
|
|
|
+ struct file *file;
|
|
|
+ __poll_t events;
|
|
|
+ struct wait_queue_head *head;
|
|
|
+
|
|
|
+ union {
|
|
|
+ struct wait_queue_entry wait;
|
|
|
+ struct work_struct work;
|
|
|
+ };
|
|
|
+};
|
|
|
+
|
|
|
struct aio_kiocb {
|
|
|
union {
|
|
|
struct kiocb rw;
|
|
|
struct fsync_iocb fsync;
|
|
|
+ struct poll_iocb poll;
|
|
|
};
|
|
|
|
|
|
struct kioctx *ki_ctx;
|
|
@@ -1558,7 +1571,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
|
|
|
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
|
|
|
iocb->aio_rw_flags))
|
|
|
return -EINVAL;
|
|
|
-
|
|
|
req->file = fget(iocb->aio_fildes);
|
|
|
if (unlikely(!req->file))
|
|
|
return -EBADF;
|
|
@@ -1573,6 +1585,124 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
|
|
|
return -EIOCBQUEUED;
|
|
|
}
|
|
|
|
|
|
+/* need to use list_del_init so we can check if item was present */
|
|
|
+static inline bool __aio_poll_remove(struct poll_iocb *req)
|
|
|
+{
|
|
|
+ if (list_empty(&req->wait.entry))
|
|
|
+ return false;
|
|
|
+ list_del_init(&req->wait.entry);
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __aio_poll_complete(struct poll_iocb *req, __poll_t mask)
|
|
|
+{
|
|
|
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
|
|
|
+ struct file *file = req->file;
|
|
|
+
|
|
|
+ aio_complete(iocb, mangle_poll(mask), 0);
|
|
|
+ fput(file);
|
|
|
+}
|
|
|
+
|
|
|
+static void aio_poll_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
|
|
+
|
|
|
+ __aio_poll_complete(req, req->events);
|
|
|
+}
|
|
|
+
|
|
|
+static int aio_poll_cancel(struct kiocb *iocb)
|
|
|
+{
|
|
|
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
|
|
+ struct poll_iocb *req = &aiocb->poll;
|
|
|
+ struct wait_queue_head *head = req->head;
|
|
|
+ bool found = false;
|
|
|
+
|
|
|
+ spin_lock(&head->lock);
|
|
|
+ found = __aio_poll_remove(req);
|
|
|
+ spin_unlock(&head->lock);
|
|
|
+
|
|
|
+ if (found) {
|
|
|
+ req->events = 0;
|
|
|
+ INIT_WORK(&req->work, aio_poll_work);
|
|
|
+ schedule_work(&req->work);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|
|
+ void *key)
|
|
|
+{
|
|
|
+ struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
|
|
|
+ struct file *file = req->file;
|
|
|
+ __poll_t mask = key_to_poll(key);
|
|
|
+
|
|
|
+ assert_spin_locked(&req->head->lock);
|
|
|
+
|
|
|
+ /* for instances that support it check for an event match first: */
|
|
|
+ if (mask && !(mask & req->events))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ mask = file->f_op->poll_mask(file, req->events);
|
|
|
+ if (!mask)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ __aio_poll_remove(req);
|
|
|
+
|
|
|
+ req->events = mask;
|
|
|
+ INIT_WORK(&req->work, aio_poll_work);
|
|
|
+ schedule_work(&req->work);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
|
|
|
+{
|
|
|
+ struct kioctx *ctx = aiocb->ki_ctx;
|
|
|
+ struct poll_iocb *req = &aiocb->poll;
|
|
|
+ __poll_t mask;
|
|
|
+
|
|
|
+ /* reject any unknown events outside the normal event mask. */
|
|
|
+ if ((u16)iocb->aio_buf != iocb->aio_buf)
|
|
|
+ return -EINVAL;
|
|
|
+ /* reject fields that are not defined for poll */
|
|
|
+ if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
|
|
+ req->file = fget(iocb->aio_fildes);
|
|
|
+ if (unlikely(!req->file))
|
|
|
+ return -EBADF;
|
|
|
+ if (!file_has_poll_mask(req->file))
|
|
|
+ goto out_fail;
|
|
|
+
|
|
|
+ req->head = req->file->f_op->get_poll_head(req->file, req->events);
|
|
|
+ if (!req->head)
|
|
|
+ goto out_fail;
|
|
|
+ if (IS_ERR(req->head)) {
|
|
|
+ mask = EPOLLERR;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+
|
|
|
+ init_waitqueue_func_entry(&req->wait, aio_poll_wake);
|
|
|
+ aiocb->ki_cancel = aio_poll_cancel;
|
|
|
+
|
|
|
+ spin_lock_irq(&ctx->ctx_lock);
|
|
|
+ spin_lock(&req->head->lock);
|
|
|
+ mask = req->file->f_op->poll_mask(req->file, req->events);
|
|
|
+ if (!mask) {
|
|
|
+ __add_wait_queue(req->head, &req->wait);
|
|
|
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
|
|
+ }
|
|
|
+ spin_unlock(&req->head->lock);
|
|
|
+ spin_unlock_irq(&ctx->ctx_lock);
|
|
|
+done:
|
|
|
+ if (mask)
|
|
|
+ __aio_poll_complete(req, mask);
|
|
|
+ return -EIOCBQUEUED;
|
|
|
+out_fail:
|
|
|
+ fput(req->file);
|
|
|
+ return -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
|
|
+}
|
|
|
+
|
|
|
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
struct iocb *iocb, bool compat)
|
|
|
{
|
|
@@ -1641,6 +1771,8 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|
|
break;
|
|
|
case IOCB_CMD_FDSYNC:
|
|
|
ret = aio_fsync(&req->fsync, iocb, true);
|
|
|
+ case IOCB_CMD_POLL:
|
|
|
+ ret = aio_poll(req, iocb);
|
|
|
break;
|
|
|
default:
|
|
|
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
|