|
@@ -138,7 +138,7 @@ static void add_extent_changeset(struct extent_state *state, unsigned bits,
|
|
|
BUG_ON(ret < 0);
|
|
|
}
|
|
|
|
|
|
-static void flush_write_bio(void *data);
|
|
|
+static void flush_write_bio(struct extent_page_data *epd);
|
|
|
|
|
|
static inline struct btrfs_fs_info *
|
|
|
tree_fs_info(struct extent_io_tree *tree)
|
|
@@ -3456,10 +3456,9 @@ done:
|
|
|
* and the end_io handler clears the writeback ranges
|
|
|
*/
|
|
|
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|
|
- void *data)
|
|
|
+ struct extent_page_data *epd)
|
|
|
{
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
- struct extent_page_data *epd = data;
|
|
|
u64 start = page_offset(page);
|
|
|
u64 page_end = start + PAGE_SIZE - 1;
|
|
|
int ret;
|
|
@@ -3905,7 +3904,7 @@ retry:
|
|
|
*/
|
|
|
static int extent_write_cache_pages(struct address_space *mapping,
|
|
|
struct writeback_control *wbc,
|
|
|
- void *data)
|
|
|
+ struct extent_page_data *epd)
|
|
|
{
|
|
|
struct inode *inode = mapping->host;
|
|
|
int ret = 0;
|
|
@@ -3969,7 +3968,7 @@ retry:
|
|
|
* mapping
|
|
|
*/
|
|
|
if (!trylock_page(page)) {
|
|
|
- flush_write_bio(data);
|
|
|
+ flush_write_bio(epd);
|
|
|
lock_page(page);
|
|
|
}
|
|
|
|
|
@@ -3980,7 +3979,7 @@ retry:
|
|
|
|
|
|
if (wbc->sync_mode != WB_SYNC_NONE) {
|
|
|
if (PageWriteback(page))
|
|
|
- flush_write_bio(data);
|
|
|
+ flush_write_bio(epd);
|
|
|
wait_on_page_writeback(page);
|
|
|
}
|
|
|
|
|
@@ -3990,7 +3989,7 @@ retry:
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- ret = __extent_writepage(page, wbc, data);
|
|
|
+ ret = __extent_writepage(page, wbc, epd);
|
|
|
|
|
|
if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
|
|
|
unlock_page(page);
|
|
@@ -4038,10 +4037,8 @@ retry:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void flush_write_bio(void *data)
|
|
|
+static void flush_write_bio(struct extent_page_data *epd)
|
|
|
{
|
|
|
- struct extent_page_data *epd = data;
|
|
|
-
|
|
|
if (epd->bio) {
|
|
|
int ret;
|
|
|
|