|
@@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|
|
|
|
|
for (i = 0; i < pagevec_count(&pvec); ++i) {
|
|
|
struct page *page = pvec.pages[i];
|
|
|
- bool rsv_on_error;
|
|
|
u32 hash;
|
|
|
|
|
|
/*
|
|
@@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
|
|
|
* cache (remove_huge_page) BEFORE removing the
|
|
|
* region/reserve map (hugetlb_unreserve_pages). In
|
|
|
* rare out of memory conditions, removal of the
|
|
|
- * region/reserve map could fail. Before free'ing
|
|
|
- * the page, note PagePrivate which is used in case
|
|
|
- * of error.
|
|
|
+ * region/reserve map could fail. Correspondingly,
|
|
|
+ * the subpool and global reserve usage count can need
|
|
|
+ * to be adjusted.
|
|
|
*/
|
|
|
- rsv_on_error = !PagePrivate(page);
|
|
|
+ VM_BUG_ON(PagePrivate(page));
|
|
|
remove_huge_page(page);
|
|
|
freed++;
|
|
|
if (!truncate_op) {
|
|
|
if (unlikely(hugetlb_unreserve_pages(inode,
|
|
|
next, next + 1, 1)))
|
|
|
- hugetlb_fix_reserve_counts(inode,
|
|
|
- rsv_on_error);
|
|
|
+ hugetlb_fix_reserve_counts(inode);
|
|
|
}
|
|
|
|
|
|
unlock_page(page);
|