|
@@ -633,6 +633,22 @@ free_pages_out:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+static void free_async_extent_pages(struct async_extent *async_extent)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (!async_extent->pages)
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (i = 0; i < async_extent->nr_pages; i++) {
|
|
|
+ WARN_ON(async_extent->pages[i]->mapping);
|
|
|
+ page_cache_release(async_extent->pages[i]);
|
|
|
+ }
|
|
|
+ kfree(async_extent->pages);
|
|
|
+ async_extent->nr_pages = 0;
|
|
|
+ async_extent->pages = NULL;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* phase two of compressed writeback. This is the ordered portion
|
|
|
* of the code, which only gets called in the order the work was
|
|
@@ -709,15 +725,7 @@ retry:
|
|
|
async_extent->compressed_size,
|
|
|
0, alloc_hint, &ins, 1, 1);
|
|
|
if (ret) {
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < async_extent->nr_pages; i++) {
|
|
|
- WARN_ON(async_extent->pages[i]->mapping);
|
|
|
- page_cache_release(async_extent->pages[i]);
|
|
|
- }
|
|
|
- kfree(async_extent->pages);
|
|
|
- async_extent->nr_pages = 0;
|
|
|
- async_extent->pages = NULL;
|
|
|
+ free_async_extent_pages(async_extent);
|
|
|
|
|
|
if (ret == -ENOSPC) {
|
|
|
unlock_extent(io_tree, async_extent->start,
|
|
@@ -827,6 +835,7 @@ retry:
|
|
|
extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
|
|
|
PAGE_END_WRITEBACK |
|
|
|
PAGE_SET_ERROR);
|
|
|
+ free_async_extent_pages(async_extent);
|
|
|
}
|
|
|
alloc_hint = ins.objectid + ins.offset;
|
|
|
kfree(async_extent);
|
|
@@ -848,6 +857,7 @@ out_free:
|
|
|
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
|
|
|
PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
|
|
|
PAGE_SET_ERROR);
|
|
|
+ free_async_extent_pages(async_extent);
|
|
|
kfree(async_extent);
|
|
|
goto again;
|
|
|
}
|