|
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
|
|
|
{
|
|
|
struct page *page, *next;
|
|
|
unsigned long flags, pfn;
|
|
|
+ int batch_count = 0;
|
|
|
|
|
|
/* Prepare pages for freeing */
|
|
|
list_for_each_entry_safe(page, next, list, lru) {
|
|
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
|
|
|
set_page_private(page, 0);
|
|
|
trace_mm_page_free_batched(page);
|
|
|
free_unref_page_commit(page, pfn);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Guard against excessive IRQ disabled times when we get
|
|
|
+ * a large list of pages to free.
|
|
|
+ */
|
|
|
+ if (++batch_count == SWAP_CLUSTER_MAX) {
|
|
|
+ local_irq_restore(flags);
|
|
|
+ batch_count = 0;
|
|
|
+ local_irq_save(flags);
|
|
|
+ }
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
|
}
|