|
@@ -178,7 +178,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
|
|
set_buffer_uptodate(bh);
|
|
|
} else {
|
|
|
buffer_io_error(bh, ", lost sync page write");
|
|
|
- set_buffer_write_io_error(bh);
|
|
|
+ mark_buffer_write_io_error(bh);
|
|
|
clear_buffer_uptodate(bh);
|
|
|
}
|
|
|
unlock_buffer(bh);
|
|
@@ -352,8 +352,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
|
|
set_buffer_uptodate(bh);
|
|
|
} else {
|
|
|
buffer_io_error(bh, ", lost async page write");
|
|
|
- mapping_set_error(page->mapping, -EIO);
|
|
|
- set_buffer_write_io_error(bh);
|
|
|
+ mark_buffer_write_io_error(bh);
|
|
|
clear_buffer_uptodate(bh);
|
|
|
SetPageError(page);
|
|
|
}
|
|
@@ -481,8 +480,6 @@ static void __remove_assoc_queue(struct buffer_head *bh)
|
|
|
{
|
|
|
list_del_init(&bh->b_assoc_buffers);
|
|
|
WARN_ON(!bh->b_assoc_map);
|
|
|
- if (buffer_write_io_error(bh))
|
|
|
- mapping_set_error(bh->b_assoc_map, -EIO);
|
|
|
bh->b_assoc_map = NULL;
|
|
|
}
|
|
|
|
|
@@ -1181,6 +1178,17 @@ void mark_buffer_dirty(struct buffer_head *bh)
|
|
|
}
|
|
|
EXPORT_SYMBOL(mark_buffer_dirty);
|
|
|
|
|
|
+void mark_buffer_write_io_error(struct buffer_head *bh)
|
|
|
+{
|
|
|
+ set_buffer_write_io_error(bh);
|
|
|
+ /* FIXME: do we need to set this in both places? */
|
|
|
+ if (bh->b_page && bh->b_page->mapping)
|
|
|
+ mapping_set_error(bh->b_page->mapping, -EIO);
|
|
|
+ if (bh->b_assoc_map)
|
|
|
+ mapping_set_error(bh->b_assoc_map, -EIO);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(mark_buffer_write_io_error);
|
|
|
+
|
|
|
/*
|
|
|
* Decrement a buffer_head's reference count. If all buffers against a page
|
|
|
* have zero reference count, are clean and unlocked, and if the page is clean
|
|
@@ -3279,8 +3287,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
|
|
|
|
|
|
bh = head;
|
|
|
do {
|
|
|
- if (buffer_write_io_error(bh) && page->mapping)
|
|
|
- mapping_set_error(page->mapping, -EIO);
|
|
|
if (buffer_busy(bh))
|
|
|
goto failed;
|
|
|
bh = bh->b_this_page;
|