|
@@ -447,58 +447,91 @@ struct mem_size_stats {
|
|
|
u64 pss;
|
|
|
};
|
|
|
|
|
|
+static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|
|
+ unsigned long size, bool young, bool dirty)
|
|
|
+{
|
|
|
+ int mapcount;
|
|
|
+
|
|
|
+ if (PageAnon(page))
|
|
|
+ mss->anonymous += size;
|
|
|
|
|
|
-static void smaps_pte_entry(pte_t ptent, unsigned long addr,
|
|
|
- unsigned long ptent_size, struct mm_walk *walk)
|
|
|
+ mss->resident += size;
|
|
|
+ /* Accumulate the size in pages that have been accessed. */
|
|
|
+ if (young || PageReferenced(page))
|
|
|
+ mss->referenced += size;
|
|
|
+ mapcount = page_mapcount(page);
|
|
|
+ if (mapcount >= 2) {
|
|
|
+ u64 pss_delta;
|
|
|
+
|
|
|
+ if (dirty || PageDirty(page))
|
|
|
+ mss->shared_dirty += size;
|
|
|
+ else
|
|
|
+ mss->shared_clean += size;
|
|
|
+ pss_delta = (u64)size << PSS_SHIFT;
|
|
|
+ do_div(pss_delta, mapcount);
|
|
|
+ mss->pss += pss_delta;
|
|
|
+ } else {
|
|
|
+ if (dirty || PageDirty(page))
|
|
|
+ mss->private_dirty += size;
|
|
|
+ else
|
|
|
+ mss->private_clean += size;
|
|
|
+ mss->pss += (u64)size << PSS_SHIFT;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|
|
+ struct mm_walk *walk)
|
|
|
{
|
|
|
struct mem_size_stats *mss = walk->private;
|
|
|
struct vm_area_struct *vma = mss->vma;
|
|
|
pgoff_t pgoff = linear_page_index(vma, addr);
|
|
|
struct page *page = NULL;
|
|
|
- int mapcount;
|
|
|
|
|
|
- if (pte_present(ptent)) {
|
|
|
- page = vm_normal_page(vma, addr, ptent);
|
|
|
- } else if (is_swap_pte(ptent)) {
|
|
|
- swp_entry_t swpent = pte_to_swp_entry(ptent);
|
|
|
+ if (pte_present(*pte)) {
|
|
|
+ page = vm_normal_page(vma, addr, *pte);
|
|
|
+ } else if (is_swap_pte(*pte)) {
|
|
|
+ swp_entry_t swpent = pte_to_swp_entry(*pte);
|
|
|
|
|
|
if (!non_swap_entry(swpent))
|
|
|
- mss->swap += ptent_size;
|
|
|
+ mss->swap += PAGE_SIZE;
|
|
|
else if (is_migration_entry(swpent))
|
|
|
page = migration_entry_to_page(swpent);
|
|
|
- } else if (pte_file(ptent)) {
|
|
|
- if (pte_to_pgoff(ptent) != pgoff)
|
|
|
- mss->nonlinear += ptent_size;
|
|
|
+ } else if (pte_file(*pte)) {
|
|
|
+ if (pte_to_pgoff(*pte) != pgoff)
|
|
|
+ mss->nonlinear += PAGE_SIZE;
|
|
|
}
|
|
|
|
|
|
if (!page)
|
|
|
return;
|
|
|
|
|
|
- if (PageAnon(page))
|
|
|
- mss->anonymous += ptent_size;
|
|
|
-
|
|
|
if (page->index != pgoff)
|
|
|
- mss->nonlinear += ptent_size;
|
|
|
+ mss->nonlinear += PAGE_SIZE;
|
|
|
|
|
|
- mss->resident += ptent_size;
|
|
|
- /* Accumulate the size in pages that have been accessed. */
|
|
|
- if (pte_young(ptent) || PageReferenced(page))
|
|
|
- mss->referenced += ptent_size;
|
|
|
- mapcount = page_mapcount(page);
|
|
|
- if (mapcount >= 2) {
|
|
|
- if (pte_dirty(ptent) || PageDirty(page))
|
|
|
- mss->shared_dirty += ptent_size;
|
|
|
- else
|
|
|
- mss->shared_clean += ptent_size;
|
|
|
- mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
|
|
|
- } else {
|
|
|
- if (pte_dirty(ptent) || PageDirty(page))
|
|
|
- mss->private_dirty += ptent_size;
|
|
|
- else
|
|
|
- mss->private_clean += ptent_size;
|
|
|
- mss->pss += (ptent_size << PSS_SHIFT);
|
|
|
- }
|
|
|
+ smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
+static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
|
+ struct mm_walk *walk)
|
|
|
+{
|
|
|
+ struct mem_size_stats *mss = walk->private;
|
|
|
+ struct vm_area_struct *vma = mss->vma;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ /* FOLL_DUMP will return -EFAULT on huge zero page */
|
|
|
+ page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
|
|
|
+ if (IS_ERR_OR_NULL(page))
|
|
|
+ return;
|
|
|
+ mss->anonymous_thp += HPAGE_PMD_SIZE;
|
|
|
+ smaps_account(mss, page, HPAGE_PMD_SIZE,
|
|
|
+ pmd_young(*pmd), pmd_dirty(*pmd));
|
|
|
}
|
|
|
+#else
|
|
|
+static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|
|
+ struct mm_walk *walk)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
@@ -509,9 +542,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
spinlock_t *ptl;
|
|
|
|
|
|
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
|
|
|
- smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
|
|
|
+ smaps_pmd_entry(pmd, addr, walk);
|
|
|
spin_unlock(ptl);
|
|
|
- mss->anonymous_thp += HPAGE_PMD_SIZE;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -524,7 +556,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
*/
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
|
for (; addr != end; pte++, addr += PAGE_SIZE)
|
|
|
- smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
|
|
|
+ smaps_pte_entry(pte, addr, walk);
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
|
|
cond_resched();
|
|
|
return 0;
|