|
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
|
|
|
+ struct vm_area_struct *vma,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ int nid;
|
|
|
+
|
|
|
+ if (!pmd_present(pmd))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ page = vm_normal_page_pmd(vma, addr, pmd);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (PageReserved(page))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ nid = page_to_nid(page);
|
|
|
+ if (!node_isset(nid, node_states[N_MEMORY]))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return page;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
|
|
|
unsigned long end, struct mm_walk *walk)
|
|
|
{
|
|
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
|
|
|
pte_t *orig_pte;
|
|
|
pte_t *pte;
|
|
|
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
ptl = pmd_trans_huge_lock(pmd, vma);
|
|
|
if (ptl) {
|
|
|
- pte_t huge_pte = *(pte_t *)pmd;
|
|
|
struct page *page;
|
|
|
|
|
|
- page = can_gather_numa_stats(huge_pte, vma, addr);
|
|
|
+ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
|
|
|
if (page)
|
|
|
- gather_stats(page, md, pte_dirty(huge_pte),
|
|
|
+ gather_stats(page, md, pmd_dirty(*pmd),
|
|
|
HPAGE_PMD_SIZE/PAGE_SIZE);
|
|
|
spin_unlock(ptl);
|
|
|
return 0;
|
|
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
|
|
|
|
|
|
if (pmd_trans_unstable(pmd))
|
|
|
return 0;
|
|
|
+#endif
|
|
|
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
|
|
do {
|
|
|
struct page *page = can_gather_numa_stats(*pte, vma, addr);
|