|
@@ -1531,6 +1531,20 @@ xfs_buf_submit(
|
|
|
xfs_buf_rele(bp);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Wait for I/O completion of a sync buffer and return the I/O error code.
|
|
|
+ */
|
|
|
+static int
|
|
|
+xfs_buf_iowait(
|
|
|
+ struct xfs_buf *bp)
|
|
|
+{
|
|
|
+ trace_xfs_buf_iowait(bp, _RET_IP_);
|
|
|
+ wait_for_completion(&bp->b_iowait);
|
|
|
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
|
|
|
+
|
|
|
+ return bp->b_error;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Synchronous buffer IO submission path, read or write.
|
|
|
*/
|
|
@@ -1553,12 +1567,7 @@ xfs_buf_submit_wait(
|
|
|
error = __xfs_buf_submit(bp);
|
|
|
if (error)
|
|
|
goto out;
|
|
|
-
|
|
|
- /* wait for completion before gathering the error from the buffer */
|
|
|
- trace_xfs_buf_iowait(bp, _RET_IP_);
|
|
|
- wait_for_completion(&bp->b_iowait);
|
|
|
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
|
|
|
- error = bp->b_error;
|
|
|
+ error = xfs_buf_iowait(bp);
|
|
|
|
|
|
out:
|
|
|
/*
|
|
@@ -1961,16 +1970,11 @@ xfs_buf_cmp(
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * submit buffers for write.
|
|
|
- *
|
|
|
- * When we have a large buffer list, we do not want to hold all the buffers
|
|
|
- * locked while we block on the request queue waiting for IO dispatch. To avoid
|
|
|
- * this problem, we lock and submit buffers in groups of 50, thereby minimising
|
|
|
- * the lock hold times for lists which may contain thousands of objects.
|
|
|
- *
|
|
|
- * To do this, we sort the buffer list before we walk the list to lock and
|
|
|
- * submit buffers, and we plug and unplug around each group of buffers we
|
|
|
- * submit.
|
|
|
+ * Submit buffers for write. If wait_list is specified, the buffers are
|
|
|
+ * submitted using sync I/O and placed on the wait list such that the caller can
|
|
|
+ * iowait each buffer. Otherwise async I/O is used and the buffers are released
|
|
|
+ * at I/O completion time. In either case, buffers remain locked until I/O
|
|
|
+ * completes and the buffer is released from the queue.
|
|
|
*/
|
|
|
static int
|
|
|
xfs_buf_delwri_submit_buffers(
|
|
@@ -2012,21 +2016,22 @@ xfs_buf_delwri_submit_buffers(
|
|
|
trace_xfs_buf_delwri_split(bp, _RET_IP_);
|
|
|
|
|
|
/*
|
|
|
- * We do all IO submission async. This means if we need
|
|
|
- * to wait for IO completion we need to take an extra
|
|
|
- * reference so the buffer is still valid on the other
|
|
|
- * side. We need to move the buffer onto the io_list
|
|
|
- * at this point so the caller can still access it.
|
|
|
+ * If we have a wait list, each buffer (and associated delwri
|
|
|
+ * queue reference) transfers to it and is submitted
|
|
|
+ * synchronously. Otherwise, drop the buffer from the delwri
|
|
|
+ * queue and submit async.
|
|
|
*/
|
|
|
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
|
|
|
- bp->b_flags |= XBF_WRITE | XBF_ASYNC;
|
|
|
+ bp->b_flags |= XBF_WRITE;
|
|
|
if (wait_list) {
|
|
|
- xfs_buf_hold(bp);
|
|
|
+ bp->b_flags &= ~XBF_ASYNC;
|
|
|
list_move_tail(&bp->b_list, wait_list);
|
|
|
- } else
|
|
|
+ __xfs_buf_submit(bp);
|
|
|
+ } else {
|
|
|
+ bp->b_flags |= XBF_ASYNC;
|
|
|
list_del_init(&bp->b_list);
|
|
|
-
|
|
|
- xfs_buf_submit(bp);
|
|
|
+ xfs_buf_submit(bp);
|
|
|
+ }
|
|
|
}
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
@@ -2073,9 +2078,11 @@ xfs_buf_delwri_submit(
|
|
|
|
|
|
list_del_init(&bp->b_list);
|
|
|
|
|
|
- /* locking the buffer will wait for async IO completion. */
|
|
|
- xfs_buf_lock(bp);
|
|
|
- error2 = bp->b_error;
|
|
|
+ /*
|
|
|
+ * Wait on the locked buffer, check for errors and unlock and
|
|
|
+ * release the delwri queue reference.
|
|
|
+ */
|
|
|
+ error2 = xfs_buf_iowait(bp);
|
|
|
xfs_buf_relse(bp);
|
|
|
if (!error)
|
|
|
error = error2;
|
|
@@ -2121,23 +2128,18 @@ xfs_buf_delwri_pushbuf(
|
|
|
|
|
|
/*
|
|
|
* Delwri submission clears the DELWRI_Q buffer flag and returns with
|
|
|
- * the buffer on the wait list with an associated reference. Rather than
|
|
|
+ * the buffer on the wait list with the original reference. Rather than
|
|
|
* bounce the buffer from a local wait list back to the original list
|
|
|
* after I/O completion, reuse the original list as the wait list.
|
|
|
*/
|
|
|
xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
|
|
|
|
|
|
/*
|
|
|
- * The buffer is now under I/O and wait listed as during typical delwri
|
|
|
- * submission. Lock the buffer to wait for I/O completion. Rather than
|
|
|
- * remove the buffer from the wait list and release the reference, we
|
|
|
- * want to return with the buffer queued to the original list. The
|
|
|
- * buffer already sits on the original list with a wait list reference,
|
|
|
- * however. If we let the queue inherit that wait list reference, all we
|
|
|
- * need to do is reset the DELWRI_Q flag.
|
|
|
+ * The buffer is now locked, under I/O and wait listed on the original
|
|
|
+ * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
|
|
|
+ * return with the buffer unlocked and on the original queue.
|
|
|
*/
|
|
|
- xfs_buf_lock(bp);
|
|
|
- error = bp->b_error;
|
|
|
+ error = xfs_buf_iowait(bp);
|
|
|
bp->b_flags |= _XBF_DELWRI_Q;
|
|
|
xfs_buf_unlock(bp);
|
|
|
|