|
@@ -1706,8 +1706,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page)
|
|
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
|
|
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
|
|
* Called by migrate.c to remove migration ptes, but might be used more later.
|
|
* Called by migrate.c to remove migration ptes, but might be used more later.
|
|
*/
|
|
*/
|
|
-static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
|
|
- struct vm_area_struct *, unsigned long, void *), void *arg)
|
|
|
|
|
|
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
|
|
{
|
|
{
|
|
struct anon_vma *anon_vma;
|
|
struct anon_vma *anon_vma;
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
|
@@ -1721,7 +1720,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
|
struct vm_area_struct *vma = avc->vma;
|
|
struct vm_area_struct *vma = avc->vma;
|
|
unsigned long address = vma_address(page, vma);
|
|
unsigned long address = vma_address(page, vma);
|
|
- ret = rmap_one(page, vma, address, arg);
|
|
|
|
|
|
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
|
|
if (ret != SWAP_AGAIN)
|
|
if (ret != SWAP_AGAIN)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1729,8 +1728,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
|
|
- struct vm_area_struct *, unsigned long, void *), void *arg)
|
|
|
|
|
|
+static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
|
{
|
|
{
|
|
struct address_space *mapping = page->mapping;
|
|
struct address_space *mapping = page->mapping;
|
|
pgoff_t pgoff = page->index << compound_order(page);
|
|
pgoff_t pgoff = page->index << compound_order(page);
|
|
@@ -1742,7 +1740,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
mutex_lock(&mapping->i_mmap_mutex);
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
unsigned long address = vma_address(page, vma);
|
|
unsigned long address = vma_address(page, vma);
|
|
- ret = rmap_one(page, vma, address, arg);
|
|
|
|
|
|
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
|
|
if (ret != SWAP_AGAIN)
|
|
if (ret != SWAP_AGAIN)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -1755,17 +1753,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
|
|
|
|
- struct vm_area_struct *, unsigned long, void *), void *arg)
|
|
|
|
|
|
+int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
|
|
{
|
|
{
|
|
VM_BUG_ON(!PageLocked(page));
|
|
VM_BUG_ON(!PageLocked(page));
|
|
|
|
|
|
if (unlikely(PageKsm(page)))
|
|
if (unlikely(PageKsm(page)))
|
|
- return rmap_walk_ksm(page, rmap_one, arg);
|
|
|
|
|
|
+ return rmap_walk_ksm(page, rwc);
|
|
else if (PageAnon(page))
|
|
else if (PageAnon(page))
|
|
- return rmap_walk_anon(page, rmap_one, arg);
|
|
|
|
|
|
+ return rmap_walk_anon(page, rwc);
|
|
else
|
|
else
|
|
- return rmap_walk_file(page, rmap_one, arg);
|
|
|
|
|
|
+ return rmap_walk_file(page, rwc);
|
|
}
|
|
}
|
|
#endif /* CONFIG_MIGRATION */
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
|