|
@@ -741,8 +741,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
|
|
* page->lru because it can be used in other hugepage operations,
|
|
* page->lru because it can be used in other hugepage operations,
|
|
* such as __unmap_hugepage_range() and gather_surplus_pages().
|
|
* such as __unmap_hugepage_range() and gather_surplus_pages().
|
|
* So instead we use page_mapping() and PageAnon().
|
|
* So instead we use page_mapping() and PageAnon().
|
|
- * We assume that this function is called with page lock held,
|
|
|
|
- * so there is no race between isolation and mapping/unmapping.
|
|
|
|
*/
|
|
*/
|
|
if (!(page_mapping(hpage) || PageAnon(hpage))) {
|
|
if (!(page_mapping(hpage) || PageAnon(hpage))) {
|
|
res = dequeue_hwpoisoned_huge_page(hpage);
|
|
res = dequeue_hwpoisoned_huge_page(hpage);
|