|
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
|
|
/* "mc" and its members are protected by cgroup_mutex */
|
|
/* "mc" and its members are protected by cgroup_mutex */
|
|
static struct move_charge_struct {
|
|
static struct move_charge_struct {
|
|
spinlock_t lock; /* for from, to */
|
|
spinlock_t lock; /* for from, to */
|
|
|
|
+ struct mm_struct *mm;
|
|
struct mem_cgroup *from;
|
|
struct mem_cgroup *from;
|
|
struct mem_cgroup *to;
|
|
struct mem_cgroup *to;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
|
|
|
|
|
|
static void mem_cgroup_clear_mc(void)
|
|
static void mem_cgroup_clear_mc(void)
|
|
{
|
|
{
|
|
|
|
+ struct mm_struct *mm = mc.mm;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* we must clear moving_task before waking up waiters at the end of
|
|
* we must clear moving_task before waking up waiters at the end of
|
|
* task migration.
|
|
* task migration.
|
|
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
|
|
spin_lock(&mc.lock);
|
|
spin_lock(&mc.lock);
|
|
mc.from = NULL;
|
|
mc.from = NULL;
|
|
mc.to = NULL;
|
|
mc.to = NULL;
|
|
|
|
+ mc.mm = NULL;
|
|
spin_unlock(&mc.lock);
|
|
spin_unlock(&mc.lock);
|
|
|
|
+
|
|
|
|
+ mmput(mm);
|
|
}
|
|
}
|
|
|
|
|
|
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
VM_BUG_ON(mc.moved_swap);
|
|
VM_BUG_ON(mc.moved_swap);
|
|
|
|
|
|
spin_lock(&mc.lock);
|
|
spin_lock(&mc.lock);
|
|
|
|
+ mc.mm = mm;
|
|
mc.from = from;
|
|
mc.from = from;
|
|
mc.to = memcg;
|
|
mc.to = memcg;
|
|
mc.flags = move_flags;
|
|
mc.flags = move_flags;
|
|
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
ret = mem_cgroup_precharge_mc(mm);
|
|
ret = mem_cgroup_precharge_mc(mm);
|
|
if (ret)
|
|
if (ret)
|
|
mem_cgroup_clear_mc();
|
|
mem_cgroup_clear_mc();
|
|
|
|
+ } else {
|
|
|
|
+ mmput(mm);
|
|
}
|
|
}
|
|
- mmput(mm);
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static void mem_cgroup_move_charge(struct mm_struct *mm)
|
|
|
|
|
|
+static void mem_cgroup_move_charge(void)
|
|
{
|
|
{
|
|
struct mm_walk mem_cgroup_move_charge_walk = {
|
|
struct mm_walk mem_cgroup_move_charge_walk = {
|
|
.pmd_entry = mem_cgroup_move_charge_pte_range,
|
|
.pmd_entry = mem_cgroup_move_charge_pte_range,
|
|
- .mm = mm,
|
|
|
|
|
|
+ .mm = mc.mm,
|
|
};
|
|
};
|
|
|
|
|
|
lru_add_drain_all();
|
|
lru_add_drain_all();
|
|
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
|
|
atomic_inc(&mc.from->moving_account);
|
|
atomic_inc(&mc.from->moving_account);
|
|
synchronize_rcu();
|
|
synchronize_rcu();
|
|
retry:
|
|
retry:
|
|
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
|
|
|
|
|
|
+ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
|
|
/*
|
|
/*
|
|
* Someone who are holding the mmap_sem might be waiting in
|
|
* Someone who are holding the mmap_sem might be waiting in
|
|
* waitq. So we cancel all extra charges, wake up all waiters,
|
|
* waitq. So we cancel all extra charges, wake up all waiters,
|
|
@@ -4885,23 +4893,16 @@ retry:
|
|
* additional charge, the page walk just aborts.
|
|
* additional charge, the page walk just aborts.
|
|
*/
|
|
*/
|
|
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
|
|
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
|
|
+ up_read(&mc.mm->mmap_sem);
|
|
atomic_dec(&mc.from->moving_account);
|
|
atomic_dec(&mc.from->moving_account);
|
|
}
|
|
}
|
|
|
|
|
|
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
|
|
|
|
|
|
+static void mem_cgroup_move_task(void)
|
|
{
|
|
{
|
|
- struct cgroup_subsys_state *css;
|
|
|
|
- struct task_struct *p = cgroup_taskset_first(tset, &css);
|
|
|
|
- struct mm_struct *mm = get_task_mm(p);
|
|
|
|
-
|
|
|
|
- if (mm) {
|
|
|
|
- if (mc.to)
|
|
|
|
- mem_cgroup_move_charge(mm);
|
|
|
|
- mmput(mm);
|
|
|
|
- }
|
|
|
|
- if (mc.to)
|
|
|
|
|
|
+ if (mc.to) {
|
|
|
|
+ mem_cgroup_move_charge();
|
|
mem_cgroup_clear_mc();
|
|
mem_cgroup_clear_mc();
|
|
|
|
+ }
|
|
}
|
|
}
|
|
#else /* !CONFIG_MMU */
|
|
#else /* !CONFIG_MMU */
|
|
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
|
|
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
|
|
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
|
|
|
|
|
|
+static void mem_cgroup_move_task(void)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
|
|
.css_reset = mem_cgroup_css_reset,
|
|
.css_reset = mem_cgroup_css_reset,
|
|
.can_attach = mem_cgroup_can_attach,
|
|
.can_attach = mem_cgroup_can_attach,
|
|
.cancel_attach = mem_cgroup_cancel_attach,
|
|
.cancel_attach = mem_cgroup_cancel_attach,
|
|
- .attach = mem_cgroup_move_task,
|
|
|
|
|
|
+ .post_attach = mem_cgroup_move_task,
|
|
.bind = mem_cgroup_bind,
|
|
.bind = mem_cgroup_bind,
|
|
.dfl_cftypes = memory_files,
|
|
.dfl_cftypes = memory_files,
|
|
.legacy_cftypes = mem_cgroup_legacy_files,
|
|
.legacy_cftypes = mem_cgroup_legacy_files,
|