|
@@ -916,6 +916,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
bool unmap_success;
|
|
bool unmap_success;
|
|
int kill = 1, forcekill;
|
|
int kill = 1, forcekill;
|
|
struct page *hpage = *hpagep;
|
|
struct page *hpage = *hpagep;
|
|
|
|
+ bool mlocked = PageMlocked(hpage);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Here we are interested only in user-mapped pages, so skip any
|
|
* Here we are interested only in user-mapped pages, so skip any
|
|
@@ -979,6 +980,13 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
|
|
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
|
|
pfn, page_mapcount(hpage));
|
|
pfn, page_mapcount(hpage));
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * try_to_unmap() might put mlocked page in lru cache, so call
|
|
|
|
+ * shake_page() again to ensure that it's flushed.
|
|
|
|
+ */
|
|
|
|
+ if (mlocked)
|
|
|
|
+ shake_page(hpage, 0);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Now that the dirty bit has been propagated to the
|
|
* Now that the dirty bit has been propagated to the
|
|
* struct page and all unmaps done we can decide if
|
|
* struct page and all unmaps done we can decide if
|