|
@@ -115,8 +115,7 @@ struct kioctx {
|
|
|
struct page **ring_pages;
|
|
|
long nr_pages;
|
|
|
|
|
|
- struct rcu_head free_rcu;
|
|
|
- struct work_struct free_work; /* see free_ioctx() */
|
|
|
+ struct rcu_work free_rwork; /* see free_ioctx() */
|
|
|
|
|
|
/*
|
|
|
* signals when all in-flight requests are done
|
|
@@ -592,13 +591,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
|
|
|
/*
|
|
|
* free_ioctx() should be RCU delayed to synchronize against the RCU
|
|
|
* protected lookup_ioctx() and also needs process context to call
|
|
|
- * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
|
|
|
- * ->free_work.
|
|
|
+ * aio_free_ring(). Use rcu_work.
|
|
|
*/
|
|
|
static void free_ioctx(struct work_struct *work)
|
|
|
{
|
|
|
- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
|
|
|
-
|
|
|
+ struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
|
|
|
+ free_rwork);
|
|
|
pr_debug("freeing %p\n", ctx);
|
|
|
|
|
|
aio_free_ring(ctx);
|
|
@@ -608,14 +606,6 @@ static void free_ioctx(struct work_struct *work)
|
|
|
kmem_cache_free(kioctx_cachep, ctx);
|
|
|
}
|
|
|
|
|
|
-static void free_ioctx_rcufn(struct rcu_head *head)
|
|
|
-{
|
|
|
- struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
|
|
|
-
|
|
|
- INIT_WORK(&ctx->free_work, free_ioctx);
|
|
|
- schedule_work(&ctx->free_work);
|
|
|
-}
|
|
|
-
|
|
|
static void free_ioctx_reqs(struct percpu_ref *ref)
|
|
|
{
|
|
|
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
|
|
@@ -625,7 +615,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
|
|
|
complete(&ctx->rq_wait->comp);
|
|
|
|
|
|
/* Synchronize against RCU protected table->table[] dereferences */
|
|
|
- call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
|
|
|
+ INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
|
|
|
+ queue_rcu_work(system_wq, &ctx->free_rwork);
|
|
|
}
|
|
|
|
|
|
/*
|