|
@@ -2328,8 +2328,14 @@ static struct page
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
int node)
|
|
int node)
|
|
{
|
|
{
|
|
|
|
+ gfp_t flags;
|
|
|
|
+
|
|
VM_BUG_ON_PAGE(*hpage, *hpage);
|
|
VM_BUG_ON_PAGE(*hpage, *hpage);
|
|
|
|
|
|
|
|
+ /* Only allocate from the target node */
|
|
|
|
+ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
|
|
|
|
+ __GFP_THISNODE;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Before allocating the hugepage, release the mmap_sem read lock.
|
|
* Before allocating the hugepage, release the mmap_sem read lock.
|
|
* The allocation can take potentially a long time if it involves
|
|
* The allocation can take potentially a long time if it involves
|
|
@@ -2338,8 +2344,7 @@ static struct page
|
|
*/
|
|
*/
|
|
up_read(&mm->mmap_sem);
|
|
up_read(&mm->mmap_sem);
|
|
|
|
|
|
- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
|
|
|
|
- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
|
|
|
|
|
|
+ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
|
|
if (unlikely(!*hpage)) {
|
|
if (unlikely(!*hpage)) {
|
|
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
*hpage = ERR_PTR(-ENOMEM);
|
|
*hpage = ERR_PTR(-ENOMEM);
|