|
@@ -4839,7 +4839,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|
|
unsigned long addr, unsigned long end,
|
|
|
struct mm_walk *walk)
|
|
|
{
|
|
|
- struct vm_area_struct *vma = walk->private;
|
|
|
+ struct vm_area_struct *vma = walk->vma;
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
|
|
@@ -4865,20 +4865,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|
|
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
|
|
|
{
|
|
|
unsigned long precharge;
|
|
|
- struct vm_area_struct *vma;
|
|
|
|
|
|
+ struct mm_walk mem_cgroup_count_precharge_walk = {
|
|
|
+ .pmd_entry = mem_cgroup_count_precharge_pte_range,
|
|
|
+ .mm = mm,
|
|
|
+ };
|
|
|
down_read(&mm->mmap_sem);
|
|
|
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
|
- struct mm_walk mem_cgroup_count_precharge_walk = {
|
|
|
- .pmd_entry = mem_cgroup_count_precharge_pte_range,
|
|
|
- .mm = mm,
|
|
|
- .private = vma,
|
|
|
- };
|
|
|
- if (is_vm_hugetlb_page(vma))
|
|
|
- continue;
|
|
|
- walk_page_range(vma->vm_start, vma->vm_end,
|
|
|
- &mem_cgroup_count_precharge_walk);
|
|
|
- }
|
|
|
+ walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
|
|
precharge = mc.precharge;
|
|
@@ -5011,7 +5004,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
|
|
|
struct mm_walk *walk)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
- struct vm_area_struct *vma = walk->private;
|
|
|
+ struct vm_area_struct *vma = walk->vma;
|
|
|
pte_t *pte;
|
|
|
spinlock_t *ptl;
|
|
|
enum mc_target_type target_type;
|
|
@@ -5107,7 +5100,10 @@ put: /* get_mctgt_type() gets the page */
|
|
|
|
|
|
static void mem_cgroup_move_charge(struct mm_struct *mm)
|
|
|
{
|
|
|
- struct vm_area_struct *vma;
|
|
|
+ struct mm_walk mem_cgroup_move_charge_walk = {
|
|
|
+ .pmd_entry = mem_cgroup_move_charge_pte_range,
|
|
|
+ .mm = mm,
|
|
|
+ };
|
|
|
|
|
|
lru_add_drain_all();
|
|
|
/*
|
|
@@ -5130,24 +5126,11 @@ retry:
|
|
|
cond_resched();
|
|
|
goto retry;
|
|
|
}
|
|
|
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
|
|
- int ret;
|
|
|
- struct mm_walk mem_cgroup_move_charge_walk = {
|
|
|
- .pmd_entry = mem_cgroup_move_charge_pte_range,
|
|
|
- .mm = mm,
|
|
|
- .private = vma,
|
|
|
- };
|
|
|
- if (is_vm_hugetlb_page(vma))
|
|
|
- continue;
|
|
|
- ret = walk_page_range(vma->vm_start, vma->vm_end,
|
|
|
- &mem_cgroup_move_charge_walk);
|
|
|
- if (ret)
|
|
|
- /*
|
|
|
- * means we have consumed all precharges and failed in
|
|
|
- * doing additional charge. Just abandon here.
|
|
|
- */
|
|
|
- break;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * When we have consumed all precharges and failed in doing
|
|
|
+ * additional charge, the page walk just aborts.
|
|
|
+ */
|
|
|
+ walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
|
|
|
up_read(&mm->mmap_sem);
|
|
|
atomic_dec(&mc.from->moving_account);
|
|
|
}
|