|
@@ -21,6 +21,8 @@
|
|
|
#include <linux/nfs_page.h>
|
|
|
#include <linux/backing-dev.h>
|
|
|
#include <linux/export.h>
|
|
|
+#include <linux/freezer.h>
|
|
|
+#include <linux/wait.h>
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
@@ -1535,27 +1537,29 @@ static void nfs_writeback_result(struct rpc_task *task,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int nfs_wait_atomic_killable(atomic_t *key)
|
|
|
+{
|
|
|
+ if (fatal_signal_pending(current))
|
|
|
+ return -ERESTARTSYS;
|
|
|
+ freezable_schedule_unsafe();
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
-static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
|
|
|
+static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ return wait_on_atomic_t(&cinfo->rpcs_out,
|
|
|
+ nfs_wait_atomic_killable, TASK_KILLABLE);
|
|
|
+}
|
|
|
|
|
|
- if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
|
|
|
- return 1;
|
|
|
- if (!may_wait)
|
|
|
- return 0;
|
|
|
- ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
|
|
|
- NFS_INO_COMMIT,
|
|
|
- nfs_wait_bit_killable,
|
|
|
- TASK_KILLABLE);
|
|
|
- return (ret < 0) ? ret : 1;
|
|
|
+static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
|
|
|
+{
|
|
|
+ atomic_inc(&cinfo->rpcs_out);
|
|
|
}
|
|
|
|
|
|
-static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
|
|
|
+static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
|
|
|
{
|
|
|
- clear_bit(NFS_INO_COMMIT, &nfsi->flags);
|
|
|
- smp_mb__after_atomic();
|
|
|
- wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
|
|
|
+ if (atomic_dec_and_test(&cinfo->rpcs_out))
|
|
|
+ wake_up_atomic_t(&cinfo->rpcs_out);
|
|
|
}
|
|
|
|
|
|
void nfs_commitdata_release(struct nfs_commit_data *data)
|
|
@@ -1693,7 +1697,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
|
|
|
data->mds_ops, how, 0);
|
|
|
out_bad:
|
|
|
nfs_retry_commit(head, NULL, cinfo, 0);
|
|
|
- cinfo->completion_ops->error_cleanup(NFS_I(inode));
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -1755,8 +1758,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
|
|
|
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
|
|
|
|
|
|
nfs_init_cinfo(&cinfo, data->inode, data->dreq);
|
|
|
- if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
|
|
|
- nfs_commit_clear_lock(NFS_I(data->inode));
|
|
|
+ nfs_commit_end(cinfo.mds);
|
|
|
}
|
|
|
|
|
|
static void nfs_commit_release(void *calldata)
|
|
@@ -1775,7 +1777,6 @@ static const struct rpc_call_ops nfs_commit_ops = {
|
|
|
|
|
|
static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
|
|
|
.completion = nfs_commit_release_pages,
|
|
|
- .error_cleanup = nfs_commit_clear_lock,
|
|
|
};
|
|
|
|
|
|
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
|
|
@@ -1794,30 +1795,25 @@ int nfs_commit_inode(struct inode *inode, int how)
|
|
|
LIST_HEAD(head);
|
|
|
struct nfs_commit_info cinfo;
|
|
|
int may_wait = how & FLUSH_SYNC;
|
|
|
+ int error = 0;
|
|
|
int res;
|
|
|
|
|
|
- res = nfs_commit_set_lock(NFS_I(inode), may_wait);
|
|
|
- if (res <= 0)
|
|
|
- goto out_mark_dirty;
|
|
|
nfs_init_cinfo_from_inode(&cinfo, inode);
|
|
|
+ nfs_commit_begin(cinfo.mds);
|
|
|
res = nfs_scan_commit(inode, &head, &cinfo);
|
|
|
- if (res) {
|
|
|
- int error;
|
|
|
-
|
|
|
+ if (res)
|
|
|
error = nfs_generic_commit_list(inode, &head, how, &cinfo);
|
|
|
- if (error < 0)
|
|
|
- return error;
|
|
|
- if (!may_wait)
|
|
|
- goto out_mark_dirty;
|
|
|
- error = wait_on_bit_action(&NFS_I(inode)->flags,
|
|
|
- NFS_INO_COMMIT,
|
|
|
- nfs_wait_bit_killable,
|
|
|
- TASK_KILLABLE);
|
|
|
- if (error < 0)
|
|
|
- return error;
|
|
|
- } else
|
|
|
- nfs_commit_clear_lock(NFS_I(inode));
|
|
|
+ nfs_commit_end(cinfo.mds);
|
|
|
+ if (error < 0)
|
|
|
+ goto out_error;
|
|
|
+ if (!may_wait)
|
|
|
+ goto out_mark_dirty;
|
|
|
+ error = wait_on_commit(cinfo.mds);
|
|
|
+ if (error < 0)
|
|
|
+ return error;
|
|
|
return res;
|
|
|
+out_error:
|
|
|
+ res = error;
|
|
|
/* Note: If we exit without ensuring that the commit is complete,
|
|
|
* we must mark the inode as dirty. Otherwise, future calls to
|
|
|
* sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
|