|
@@ -523,6 +523,17 @@ try_again:
|
|
total_bytes = head->wb_bytes;
|
|
total_bytes = head->wb_bytes;
|
|
for (subreq = head->wb_this_page; subreq != head;
|
|
for (subreq = head->wb_this_page; subreq != head;
|
|
subreq = subreq->wb_this_page) {
|
|
subreq = subreq->wb_this_page) {
|
|
|
|
+ if (!nfs_lock_request(subreq)) {
|
|
|
|
+ /* releases page group bit lock and
|
|
|
|
+ * inode spin lock and all references */
|
|
|
|
+ ret = nfs_unroll_locks_and_wait(inode, head,
|
|
|
|
+ subreq);
|
|
|
|
+
|
|
|
|
+ if (ret == 0)
|
|
|
|
+ goto try_again;
|
|
|
|
+
|
|
|
|
+ return ERR_PTR(ret);
|
|
|
|
+ }
|
|
/*
|
|
/*
|
|
* Subrequests are always contiguous, non overlapping
|
|
* Subrequests are always contiguous, non overlapping
|
|
* and in order - but may be repeated (mirrored writes).
|
|
* and in order - but may be repeated (mirrored writes).
|
|
@@ -533,21 +544,10 @@ try_again:
|
|
} else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
|
|
} else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
|
|
((subreq->wb_offset + subreq->wb_bytes) >
|
|
((subreq->wb_offset + subreq->wb_bytes) >
|
|
(head->wb_offset + total_bytes)))) {
|
|
(head->wb_offset + total_bytes)))) {
|
|
|
|
+ nfs_unlock_request(subreq);
|
|
nfs_unroll_locks_and_wait(inode, head, subreq);
|
|
nfs_unroll_locks_and_wait(inode, head, subreq);
|
|
return ERR_PTR(-EIO);
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
}
|
|
-
|
|
|
|
- if (!nfs_lock_request(subreq)) {
|
|
|
|
- /* releases page group bit lock and
|
|
|
|
- * inode spin lock and all references */
|
|
|
|
- ret = nfs_unroll_locks_and_wait(inode, head,
|
|
|
|
- subreq);
|
|
|
|
-
|
|
|
|
- if (ret == 0)
|
|
|
|
- goto try_again;
|
|
|
|
-
|
|
|
|
- return ERR_PTR(ret);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Now that all requests are locked, make sure they aren't on any list.
|
|
/* Now that all requests are locked, make sure they aren't on any list.
|