|
@@ -494,30 +494,6 @@ allocate_blocks:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-STATIC void
|
|
|
|
-xfs_start_page_writeback(
|
|
|
|
- struct page *page,
|
|
|
|
- int clear_dirty)
|
|
|
|
-{
|
|
|
|
- ASSERT(PageLocked(page));
|
|
|
|
- ASSERT(!PageWriteback(page));
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * if the page was not fully cleaned, we need to ensure that the higher
|
|
|
|
- * layers come back to it correctly. That means we need to keep the page
|
|
|
|
- * dirty, and for WB_SYNC_ALL writeback we need to ensure the
|
|
|
|
- * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
|
|
|
|
- * write this page in this writeback sweep will be made.
|
|
|
|
- */
|
|
|
|
- if (clear_dirty) {
|
|
|
|
- clear_page_dirty_for_io(page);
|
|
|
|
- set_page_writeback(page);
|
|
|
|
- } else
|
|
|
|
- set_page_writeback_keepwrite(page);
|
|
|
|
-
|
|
|
|
- unlock_page(page);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Submit the bio for an ioend. We are passed an ioend with a bio attached to
|
|
* Submit the bio for an ioend. We are passed an ioend with a bio attached to
|
|
* it, and we submit that bio. The ioend may be used for multiple bio
|
|
* it, and we submit that bio. The ioend may be used for multiple bio
|
|
@@ -858,6 +834,8 @@ xfs_writepage_map(
|
|
}
|
|
}
|
|
|
|
|
|
ASSERT(wpc->ioend || list_empty(&submit_list));
|
|
ASSERT(wpc->ioend || list_empty(&submit_list));
|
|
|
|
+ ASSERT(PageLocked(page));
|
|
|
|
+ ASSERT(!PageWriteback(page));
|
|
|
|
|
|
/*
|
|
/*
|
|
* On error, we have to fail the ioend here because we have locked
|
|
* On error, we have to fail the ioend here because we have locked
|
|
@@ -877,7 +855,21 @@ xfs_writepage_map(
|
|
* treated correctly on error.
|
|
* treated correctly on error.
|
|
*/
|
|
*/
|
|
if (count) {
|
|
if (count) {
|
|
- xfs_start_page_writeback(page, !error);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If the page was not fully cleaned, we need to ensure that the
|
|
|
|
+ * higher layers come back to it correctly. That means we need
|
|
|
|
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
|
|
|
|
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
|
|
|
|
+ * so another attempt to write this page in this writeback sweep
|
|
|
|
+ * will be made.
|
|
|
|
+ */
|
|
|
|
+ if (error) {
|
|
|
|
+ set_page_writeback_keepwrite(page);
|
|
|
|
+ } else {
|
|
|
|
+ clear_page_dirty_for_io(page);
|
|
|
|
+ set_page_writeback(page);
|
|
|
|
+ }
|
|
|
|
+ unlock_page(page);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Preserve the original error if there was one, otherwise catch
|
|
* Preserve the original error if there was one, otherwise catch
|
|
@@ -902,7 +894,9 @@ xfs_writepage_map(
|
|
* race with a partial page truncate on a sub-page block sized
|
|
* race with a partial page truncate on a sub-page block sized
|
|
* filesystem. In that case we need to mark the page clean.
|
|
* filesystem. In that case we need to mark the page clean.
|
|
*/
|
|
*/
|
|
- xfs_start_page_writeback(page, 1);
|
|
|
|
|
|
+ clear_page_dirty_for_io(page);
|
|
|
|
+ set_page_writeback(page);
|
|
|
|
+ unlock_page(page);
|
|
end_page_writeback(page);
|
|
end_page_writeback(page);
|
|
}
|
|
}
|
|
|
|
|