|
@@ -97,6 +97,7 @@
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/printk.h>
|
|
|
|
+#include <linux/swapops.h>
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/uaccess.h>
|
|
@@ -426,6 +427,49 @@ static inline bool queue_pages_required(struct page *page,
|
|
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
|
|
return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|
|
|
+ unsigned long end, struct mm_walk *walk)
|
|
|
|
+{
|
|
|
|
+ int ret = 0;
|
|
|
|
+ struct page *page;
|
|
|
|
+ struct queue_pages *qp = walk->private;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (unlikely(is_pmd_migration_entry(*pmd))) {
|
|
|
|
+ ret = 1;
|
|
|
|
+ goto unlock;
|
|
|
|
+ }
|
|
|
|
+ page = pmd_page(*pmd);
|
|
|
|
+ if (is_huge_zero_page(page)) {
|
|
|
|
+ spin_unlock(ptl);
|
|
|
|
+ __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ if (!thp_migration_supported()) {
|
|
|
|
+ get_page(page);
|
|
|
|
+ spin_unlock(ptl);
|
|
|
|
+ lock_page(page);
|
|
|
|
+ ret = split_huge_page(page);
|
|
|
|
+ unlock_page(page);
|
|
|
|
+ put_page(page);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+ if (!queue_pages_required(page, qp)) {
|
|
|
|
+ ret = 1;
|
|
|
|
+ goto unlock;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ret = 1;
|
|
|
|
+ flags = qp->flags;
|
|
|
|
+ /* go to thp migration */
|
|
|
|
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
|
|
|
+ migrate_page_add(page, qp->pagelist, flags);
|
|
|
|
+unlock:
|
|
|
|
+ spin_unlock(ptl);
|
|
|
|
+out:
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Scan through pages checking if pages follow certain conditions,
|
|
* Scan through pages checking if pages follow certain conditions,
|
|
* and move them to the pagelist if they do.
|
|
* and move them to the pagelist if they do.
|
|
@@ -437,30 +481,15 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|
struct page *page;
|
|
struct page *page;
|
|
struct queue_pages *qp = walk->private;
|
|
struct queue_pages *qp = walk->private;
|
|
unsigned long flags = qp->flags;
|
|
unsigned long flags = qp->flags;
|
|
- int nid, ret;
|
|
|
|
|
|
+ int ret;
|
|
pte_t *pte;
|
|
pte_t *pte;
|
|
spinlock_t *ptl;
|
|
spinlock_t *ptl;
|
|
|
|
|
|
- if (pmd_trans_huge(*pmd)) {
|
|
|
|
- ptl = pmd_lock(walk->mm, pmd);
|
|
|
|
- if (pmd_trans_huge(*pmd)) {
|
|
|
|
- page = pmd_page(*pmd);
|
|
|
|
- if (is_huge_zero_page(page)) {
|
|
|
|
- spin_unlock(ptl);
|
|
|
|
- __split_huge_pmd(vma, pmd, addr, false, NULL);
|
|
|
|
- } else {
|
|
|
|
- get_page(page);
|
|
|
|
- spin_unlock(ptl);
|
|
|
|
- lock_page(page);
|
|
|
|
- ret = split_huge_page(page);
|
|
|
|
- unlock_page(page);
|
|
|
|
- put_page(page);
|
|
|
|
- if (ret)
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
- } else {
|
|
|
|
- spin_unlock(ptl);
|
|
|
|
- }
|
|
|
|
|
|
+ ptl = pmd_trans_huge_lock(pmd, vma);
|
|
|
|
+ if (ptl) {
|
|
|
|
+ ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
|
|
|
|
+ if (ret)
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
if (pmd_trans_unstable(pmd))
|
|
@@ -481,7 +510,7 @@ retry:
|
|
continue;
|
|
continue;
|
|
if (!queue_pages_required(page, qp))
|
|
if (!queue_pages_required(page, qp))
|
|
continue;
|
|
continue;
|
|
- if (PageTransCompound(page)) {
|
|
|
|
|
|
+ if (PageTransCompound(page) && !thp_migration_supported()) {
|
|
get_page(page);
|
|
get_page(page);
|
|
pte_unmap_unlock(pte, ptl);
|
|
pte_unmap_unlock(pte, ptl);
|
|
lock_page(page);
|
|
lock_page(page);
|
|
@@ -893,19 +922,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
|
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
#ifdef CONFIG_MIGRATION
|
|
/*
|
|
/*
|
|
- * page migration
|
|
|
|
|
|
+ * page migration, thp tail pages can be passed.
|
|
*/
|
|
*/
|
|
static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|
unsigned long flags)
|
|
unsigned long flags)
|
|
{
|
|
{
|
|
|
|
+ struct page *head = compound_head(page);
|
|
/*
|
|
/*
|
|
* Avoid migrating a page that is shared with others.
|
|
* Avoid migrating a page that is shared with others.
|
|
*/
|
|
*/
|
|
- if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
|
|
|
|
- if (!isolate_lru_page(page)) {
|
|
|
|
- list_add_tail(&page->lru, pagelist);
|
|
|
|
- inc_node_page_state(page, NR_ISOLATED_ANON +
|
|
|
|
- page_is_file_cache(page));
|
|
|
|
|
|
+ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
|
|
|
|
+ if (!isolate_lru_page(head)) {
|
|
|
|
+ list_add_tail(&head->lru, pagelist);
|
|
|
|
+ mod_node_page_state(page_pgdat(head),
|
|
|
|
+ NR_ISOLATED_ANON + page_is_file_cache(head),
|
|
|
|
+ hpage_nr_pages(head));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -915,7 +946,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
|
|
if (PageHuge(page))
|
|
if (PageHuge(page))
|
|
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
|
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
|
node);
|
|
node);
|
|
- else
|
|
|
|
|
|
+ else if (thp_migration_supported() && PageTransHuge(page)) {
|
|
|
|
+ struct page *thp;
|
|
|
|
+
|
|
|
|
+ thp = alloc_pages_node(node,
|
|
|
|
+ (GFP_TRANSHUGE | __GFP_THISNODE),
|
|
|
|
+ HPAGE_PMD_ORDER);
|
|
|
|
+ if (!thp)
|
|
|
|
+ return NULL;
|
|
|
|
+ prep_transhuge_page(thp);
|
|
|
|
+ return thp;
|
|
|
|
+ } else
|
|
return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
|
|
return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
|
|
__GFP_THISNODE, 0);
|
|
__GFP_THISNODE, 0);
|
|
}
|
|
}
|
|
@@ -1081,6 +1122,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
|
|
if (PageHuge(page)) {
|
|
if (PageHuge(page)) {
|
|
BUG_ON(!vma);
|
|
BUG_ON(!vma);
|
|
return alloc_huge_page_noerr(vma, address, 1);
|
|
return alloc_huge_page_noerr(vma, address, 1);
|
|
|
|
+ } else if (thp_migration_supported() && PageTransHuge(page)) {
|
|
|
|
+ struct page *thp;
|
|
|
|
+
|
|
|
|
+ thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
|
|
|
|
+ HPAGE_PMD_ORDER);
|
|
|
|
+ if (!thp)
|
|
|
|
+ return NULL;
|
|
|
|
+ prep_transhuge_page(thp);
|
|
|
|
+ return thp;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
* if !vma, alloc_page_vma() will use task or system default policy
|
|
* if !vma, alloc_page_vma() will use task or system default policy
|