|
@@ -886,45 +886,48 @@ struct page_referenced_arg {
|
|
|
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
unsigned long address, void *arg)
|
|
|
{
|
|
|
- struct mm_struct *mm = vma->vm_mm;
|
|
|
struct page_referenced_arg *pra = arg;
|
|
|
- pmd_t *pmd;
|
|
|
- pte_t *pte;
|
|
|
- spinlock_t *ptl;
|
|
|
+ struct page_vma_mapped_walk pvmw = {
|
|
|
+ .page = page,
|
|
|
+ .vma = vma,
|
|
|
+ .address = address,
|
|
|
+ };
|
|
|
int referenced = 0;
|
|
|
|
|
|
- if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
|
|
|
- return SWAP_AGAIN;
|
|
|
+ while (page_vma_mapped_walk(&pvmw)) {
|
|
|
+ address = pvmw.address;
|
|
|
|
|
|
- if (vma->vm_flags & VM_LOCKED) {
|
|
|
- if (pte)
|
|
|
- pte_unmap(pte);
|
|
|
- spin_unlock(ptl);
|
|
|
- pra->vm_flags |= VM_LOCKED;
|
|
|
- return SWAP_FAIL; /* To break the loop */
|
|
|
- }
|
|
|
+ if (vma->vm_flags & VM_LOCKED) {
|
|
|
+ page_vma_mapped_walk_done(&pvmw);
|
|
|
+ pra->vm_flags |= VM_LOCKED;
|
|
|
+ return SWAP_FAIL; /* To break the loop */
|
|
|
+ }
|
|
|
|
|
|
- if (pte) {
|
|
|
- if (ptep_clear_flush_young_notify(vma, address, pte)) {
|
|
|
- /*
|
|
|
- * Don't treat a reference through a sequentially read
|
|
|
- * mapping as such. If the page has been used in
|
|
|
- * another mapping, we will catch it; if this other
|
|
|
- * mapping is already gone, the unmap path will have
|
|
|
- * set PG_referenced or activated the page.
|
|
|
- */
|
|
|
- if (likely(!(vma->vm_flags & VM_SEQ_READ)))
|
|
|
+ if (pvmw.pte) {
|
|
|
+ if (ptep_clear_flush_young_notify(vma, address,
|
|
|
+ pvmw.pte)) {
|
|
|
+ /*
|
|
|
+ * Don't treat a reference through
|
|
|
+ * a sequentially read mapping as such.
|
|
|
+ * If the page has been used in another mapping,
|
|
|
+ * we will catch it; if this other mapping is
|
|
|
+ * already gone, the unmap path will have set
|
|
|
+ * PG_referenced or activated the page.
|
|
|
+ */
|
|
|
+ if (likely(!(vma->vm_flags & VM_SEQ_READ)))
|
|
|
+ referenced++;
|
|
|
+ }
|
|
|
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
|
|
+ if (pmdp_clear_flush_young_notify(vma, address,
|
|
|
+ pvmw.pmd))
|
|
|
referenced++;
|
|
|
+ } else {
|
|
|
+ /* unexpected pmd-mapped page? */
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
}
|
|
|
- pte_unmap(pte);
|
|
|
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
|
|
|
- if (pmdp_clear_flush_young_notify(vma, address, pmd))
|
|
|
- referenced++;
|
|
|
- } else {
|
|
|
- /* unexpected pmd-mapped page? */
|
|
|
- WARN_ON_ONCE(1);
|
|
|
+
|
|
|
+ pra->mapcount--;
|
|
|
}
|
|
|
- spin_unlock(ptl);
|
|
|
|
|
|
if (referenced)
|
|
|
clear_page_idle(page);
|
|
@@ -936,7 +939,6 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
pra->vm_flags |= vma->vm_flags;
|
|
|
}
|
|
|
|
|
|
- pra->mapcount--;
|
|
|
if (!pra->mapcount)
|
|
|
return SWAP_SUCCESS; /* To break the loop */
|
|
|
|