|
@@ -487,13 +487,6 @@ xlog_cil_push(
|
|
|
new_ctx->cil = cil;
|
|
|
cil->xc_ctx = new_ctx;
|
|
|
|
|
|
- /*
|
|
|
- * mirror the new sequence into the cil structure so that we can do
|
|
|
- * unlocked checks against the current sequence in log forces without
|
|
|
- * risking deferencing a freed context pointer.
|
|
|
- */
|
|
|
- cil->xc_current_sequence = new_ctx->sequence;
|
|
|
-
|
|
|
/*
|
|
|
* The switch is now done, so we can drop the context lock and move out
|
|
|
* of a shared context. We can't just go straight to the commit record,
|
|
@@ -512,8 +505,15 @@ xlog_cil_push(
|
|
|
* Hence we need to add this context to the committing context list so
|
|
|
* that higher sequences will wait for us to write out a commit record
|
|
|
* before they do.
|
|
|
+ *
|
|
|
+ * xfs_log_force_lsn requires us to mirror the new sequence into the cil
|
|
|
+ * structure atomically with the addition of this sequence to the
|
|
|
+ * committing list. This also ensures that we can do unlocked checks
|
|
|
+ * against the current sequence in log forces without risking
|
|
|
+ * deferencing a freed context pointer.
|
|
|
*/
|
|
|
spin_lock(&cil->xc_push_lock);
|
|
|
+ cil->xc_current_sequence = new_ctx->sequence;
|
|
|
list_add(&ctx->committing, &cil->xc_committing);
|
|
|
spin_unlock(&cil->xc_push_lock);
|
|
|
up_write(&cil->xc_ctx_lock);
|
|
@@ -651,8 +651,14 @@ xlog_cil_push_background(
|
|
|
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
|
|
|
+ * number that is passed. When it returns, the work will be queued for
|
|
|
+ * @push_seq, but it won't be completed. The caller is expected to do any
|
|
|
+ * waiting for push_seq to complete if it is required.
|
|
|
+ */
|
|
|
static void
|
|
|
-xlog_cil_push_foreground(
|
|
|
+xlog_cil_push_now(
|
|
|
struct xlog *log,
|
|
|
xfs_lsn_t push_seq)
|
|
|
{
|
|
@@ -677,10 +683,8 @@ xlog_cil_push_foreground(
|
|
|
}
|
|
|
|
|
|
cil->xc_push_seq = push_seq;
|
|
|
+ queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
|
|
|
spin_unlock(&cil->xc_push_lock);
|
|
|
-
|
|
|
- /* do the push now */
|
|
|
- xlog_cil_push(log);
|
|
|
}
|
|
|
|
|
|
bool
|
|
@@ -785,7 +789,8 @@ xlog_cil_force_lsn(
|
|
|
* xlog_cil_push() handles racing pushes for the same sequence,
|
|
|
* so no need to deal with it here.
|
|
|
*/
|
|
|
- xlog_cil_push_foreground(log, sequence);
|
|
|
+restart:
|
|
|
+ xlog_cil_push_now(log, sequence);
|
|
|
|
|
|
/*
|
|
|
* See if we can find a previous sequence still committing.
|
|
@@ -793,7 +798,6 @@ xlog_cil_force_lsn(
|
|
|
* before allowing the force of push_seq to go ahead. Hence block
|
|
|
* on commits for those as well.
|
|
|
*/
|
|
|
-restart:
|
|
|
spin_lock(&cil->xc_push_lock);
|
|
|
list_for_each_entry(ctx, &cil->xc_committing, committing) {
|
|
|
if (ctx->sequence > sequence)
|
|
@@ -811,6 +815,28 @@ restart:
|
|
|
/* found it! */
|
|
|
commit_lsn = ctx->commit_lsn;
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The call to xlog_cil_push_now() executes the push in the background.
|
|
|
+ * Hence by the time we have got here it our sequence may not have been
|
|
|
+ * pushed yet. This is true if the current sequence still matches the
|
|
|
+ * push sequence after the above wait loop and the CIL still contains
|
|
|
+ * dirty objects.
|
|
|
+ *
|
|
|
+ * When the push occurs, it will empty the CIL and
|
|
|
+ * atomically increment the currect sequence past the push sequence and
|
|
|
+ * move it into the committing list. Of course, if the CIL is clean at
|
|
|
+ * the time of the push, it won't have pushed the CIL at all, so in that
|
|
|
+ * case we should try the push for this sequence again from the start
|
|
|
+ * just in case.
|
|
|
+ */
|
|
|
+
|
|
|
+ if (sequence == cil->xc_current_sequence &&
|
|
|
+ !list_empty(&cil->xc_cil)) {
|
|
|
+ spin_unlock(&cil->xc_push_lock);
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock(&cil->xc_push_lock);
|
|
|
return commit_lsn;
|
|
|
}
|