|
@@ -101,53 +101,18 @@ nfs_page_free(struct nfs_page *p)
|
|
|
kmem_cache_free(nfs_page_cachep, p);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-nfs_iocounter_inc(struct nfs_io_counter *c)
|
|
|
-{
|
|
|
- atomic_inc(&c->io_count);
|
|
|
-}
|
|
|
-
|
|
|
-static void
|
|
|
-nfs_iocounter_dec(struct nfs_io_counter *c)
|
|
|
-{
|
|
|
- if (atomic_dec_and_test(&c->io_count)) {
|
|
|
- clear_bit(NFS_IO_INPROGRESS, &c->flags);
|
|
|
- smp_mb__after_atomic();
|
|
|
- wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static int
|
|
|
-__nfs_iocounter_wait(struct nfs_io_counter *c)
|
|
|
-{
|
|
|
- wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
|
|
|
- DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- do {
|
|
|
- prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
|
|
|
- set_bit(NFS_IO_INPROGRESS, &c->flags);
|
|
|
- if (atomic_read(&c->io_count) == 0)
|
|
|
- break;
|
|
|
- ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
|
|
|
- } while (atomic_read(&c->io_count) != 0 && !ret);
|
|
|
- finish_wait(wq, &q.wait);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* nfs_iocounter_wait - wait for i/o to complete
|
|
|
- * @c: nfs_io_counter to use
|
|
|
+ * @l_ctx: nfs_lock_context with io_counter to use
|
|
|
*
|
|
|
* returns -ERESTARTSYS if interrupted by a fatal signal.
|
|
|
* Otherwise returns 0 once the io_count hits 0.
|
|
|
*/
|
|
|
int
|
|
|
-nfs_iocounter_wait(struct nfs_io_counter *c)
|
|
|
+nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
|
|
|
{
|
|
|
- if (atomic_read(&c->io_count) == 0)
|
|
|
- return 0;
|
|
|
- return __nfs_iocounter_wait(c);
|
|
|
+ return wait_on_atomic_t(&l_ctx->io_count, nfs_wait_atomic_killable,
|
|
|
+ TASK_KILLABLE);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -370,7 +335,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
|
|
|
return ERR_CAST(l_ctx);
|
|
|
}
|
|
|
req->wb_lock_context = l_ctx;
|
|
|
- nfs_iocounter_inc(&l_ctx->io_count);
|
|
|
+ atomic_inc(&l_ctx->io_count);
|
|
|
|
|
|
/* Initialize the request struct. Initially, we assume a
|
|
|
* long write-back delay. This will be adjusted in
|
|
@@ -431,7 +396,8 @@ static void nfs_clear_request(struct nfs_page *req)
|
|
|
req->wb_page = NULL;
|
|
|
}
|
|
|
if (l_ctx != NULL) {
|
|
|
- nfs_iocounter_dec(&l_ctx->io_count);
|
|
|
+ if (atomic_dec_and_test(&l_ctx->io_count))
|
|
|
+ wake_up_atomic_t(&l_ctx->io_count);
|
|
|
nfs_put_lock_context(l_ctx);
|
|
|
req->wb_lock_context = NULL;
|
|
|
}
|