|
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
|
|
|
* value (scan code).
|
|
|
*/
|
|
|
|
|
|
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
|
|
|
+static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
|
|
|
+ struct vm_area_struct **vmap)
|
|
|
{
|
|
|
struct vm_area_struct *vma;
|
|
|
unsigned long hstart, hend;
|
|
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
|
|
|
if (unlikely(khugepaged_test_exit(mm)))
|
|
|
return SCAN_ANY_PROCESS;
|
|
|
|
|
|
- vma = find_vma(mm, address);
|
|
|
+ *vmap = vma = find_vma(mm, address);
|
|
|
if (!vma)
|
|
|
return SCAN_VMA_NULL;
|
|
|
|
|
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
|
|
.pmd = pmd,
|
|
|
};
|
|
|
|
|
|
+ /* we only decide to swapin, if there is enough young ptes */
|
|
|
+ if (referenced < HPAGE_PMD_NR/2) {
|
|
|
+ trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
fe.pte = pte_offset_map(pmd, address);
|
|
|
for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
|
|
|
fe.pte++, fe.address += PAGE_SIZE) {
|
|
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
|
|
if (!is_swap_pte(pteval))
|
|
|
continue;
|
|
|
swapped_in++;
|
|
|
- /* we only decide to swapin, if there is enough young ptes */
|
|
|
- if (referenced < HPAGE_PMD_NR/2) {
|
|
|
- trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
|
|
- return false;
|
|
|
- }
|
|
|
ret = do_swap_page(&fe, pteval);
|
|
|
|
|
|
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
|
|
|
if (ret & VM_FAULT_RETRY) {
|
|
|
down_read(&mm->mmap_sem);
|
|
|
- if (hugepage_vma_revalidate(mm, address)) {
|
|
|
+ if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
|
|
|
/* vma is no longer available, don't continue to swapin */
|
|
|
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
|
|
return false;
|
|
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
|
|
static void collapse_huge_page(struct mm_struct *mm,
|
|
|
unsigned long address,
|
|
|
struct page **hpage,
|
|
|
- struct vm_area_struct *vma,
|
|
|
int node, int referenced)
|
|
|
{
|
|
|
pmd_t *pmd, _pmd;
|
|
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
spinlock_t *pmd_ptl, *pte_ptl;
|
|
|
int isolated = 0, result = 0;
|
|
|
struct mem_cgroup *memcg;
|
|
|
+ struct vm_area_struct *vma;
|
|
|
unsigned long mmun_start; /* For mmu_notifiers */
|
|
|
unsigned long mmun_end; /* For mmu_notifiers */
|
|
|
gfp_t gfp;
|
|
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
}
|
|
|
|
|
|
down_read(&mm->mmap_sem);
|
|
|
- result = hugepage_vma_revalidate(mm, address);
|
|
|
+ result = hugepage_vma_revalidate(mm, address, &vma);
|
|
|
if (result) {
|
|
|
mem_cgroup_cancel_charge(new_page, memcg, true);
|
|
|
up_read(&mm->mmap_sem);
|
|
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
* handled by the anon_vma lock + PG_lock.
|
|
|
*/
|
|
|
down_write(&mm->mmap_sem);
|
|
|
- result = hugepage_vma_revalidate(mm, address);
|
|
|
+ result = hugepage_vma_revalidate(mm, address, &vma);
|
|
|
if (result)
|
|
|
goto out;
|
|
|
/* check if the pmd is still valid */
|
|
@@ -1202,7 +1203,7 @@ out_unmap:
|
|
|
if (ret) {
|
|
|
node = khugepaged_find_target_node();
|
|
|
/* collapse_huge_page will return with the mmap_sem released */
|
|
|
- collapse_huge_page(mm, address, hpage, vma, node, referenced);
|
|
|
+ collapse_huge_page(mm, address, hpage, node, referenced);
|
|
|
}
|
|
|
out:
|
|
|
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
|