|
@@ -724,7 +724,7 @@ struct page_referenced_arg {
|
|
|
/*
|
|
|
* arg: page_referenced_arg will be passed
|
|
|
*/
|
|
|
-static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
+static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
unsigned long address, void *arg)
|
|
|
{
|
|
|
struct page_referenced_arg *pra = arg;
|
|
@@ -741,7 +741,7 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
if (vma->vm_flags & VM_LOCKED) {
|
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
|
pra->vm_flags |= VM_LOCKED;
|
|
|
- return SWAP_FAIL; /* To break the loop */
|
|
|
+ return false; /* To break the loop */
|
|
|
}
|
|
|
|
|
|
if (pvmw.pte) {
|
|
@@ -781,9 +781,9 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
|
|
|
}
|
|
|
|
|
|
if (!pra->mapcount)
|
|
|
- return SWAP_SUCCESS; /* To break the loop */
|
|
|
+ return false; /* To break the loop */
|
|
|
|
|
|
- return SWAP_AGAIN;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
|
|
@@ -854,7 +854,7 @@ int page_referenced(struct page *page,
|
|
|
return pra.referenced;
|
|
|
}
|
|
|
|
|
|
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
|
+static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
|
unsigned long address, void *arg)
|
|
|
{
|
|
|
struct page_vma_mapped_walk pvmw = {
|
|
@@ -907,7 +907,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return SWAP_AGAIN;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
|
|
@@ -1290,7 +1290,7 @@ void page_remove_rmap(struct page *page, bool compound)
|
|
|
/*
|
|
|
* @arg: enum ttu_flags will be passed to this argument
|
|
|
*/
|
|
|
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
+static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
unsigned long address, void *arg)
|
|
|
{
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
@@ -1301,12 +1301,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
};
|
|
|
pte_t pteval;
|
|
|
struct page *subpage;
|
|
|
- int ret = SWAP_AGAIN;
|
|
|
+ bool ret = true;
|
|
|
enum ttu_flags flags = (enum ttu_flags)arg;
|
|
|
|
|
|
/* munlock has nothing to gain from examining un-locked vmas */
|
|
|
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
|
|
|
- return SWAP_AGAIN;
|
|
|
+ return true;
|
|
|
|
|
|
if (flags & TTU_SPLIT_HUGE_PMD) {
|
|
|
split_huge_pmd_address(vma, address,
|
|
@@ -1329,7 +1329,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
mlock_vma_page(page);
|
|
|
}
|
|
|
- ret = SWAP_FAIL;
|
|
|
+ ret = false;
|
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
|
break;
|
|
|
}
|
|
@@ -1347,7 +1347,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
if (!(flags & TTU_IGNORE_ACCESS)) {
|
|
|
if (ptep_clear_flush_young_notify(vma, address,
|
|
|
pvmw.pte)) {
|
|
|
- ret = SWAP_FAIL;
|
|
|
+ ret = false;
|
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
|
break;
|
|
|
}
|
|
@@ -1437,14 +1437,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
set_pte_at(mm, address, pvmw.pte, pteval);
|
|
|
SetPageSwapBacked(page);
|
|
|
- ret = SWAP_FAIL;
|
|
|
+ ret = false;
|
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
if (swap_duplicate(entry) < 0) {
|
|
|
set_pte_at(mm, address, pvmw.pte, pteval);
|
|
|
- ret = SWAP_FAIL;
|
|
|
+ ret = false;
|
|
|
page_vma_mapped_walk_done(&pvmw);
|
|
|
break;
|
|
|
}
|
|
@@ -1636,7 +1636,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
|
|
|
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
|
|
|
continue;
|
|
|
|
|
|
- if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
|
|
|
+ if (!rwc->rmap_one(page, vma, address, rwc->arg))
|
|
|
break;
|
|
|
if (rwc->done && rwc->done(page))
|
|
|
break;
|
|
@@ -1690,7 +1690,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
|
|
|
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
|
|
|
continue;
|
|
|
|
|
|
- if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
|
|
|
+ if (!rwc->rmap_one(page, vma, address, rwc->arg))
|
|
|
goto done;
|
|
|
if (rwc->done && rwc->done(page))
|
|
|
goto done;
|