|
@@ -676,9 +676,11 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
|
|
|
static void drain_all_stock_async(struct mem_cgroup *memcg);
|
|
|
|
|
|
static struct mem_cgroup_per_zone *
|
|
|
-mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
|
|
|
+mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
|
|
|
{
|
|
|
- VM_BUG_ON((unsigned)nid >= nr_node_ids);
|
|
|
+ int nid = zone_to_nid(zone);
|
|
|
+ int zid = zone_idx(zone);
|
|
|
+
|
|
|
return &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
}
|
|
|
|
|
@@ -688,12 +690,12 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
|
|
|
}
|
|
|
|
|
|
static struct mem_cgroup_per_zone *
|
|
|
-page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
|
|
|
+mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
|
|
|
{
|
|
|
int nid = page_to_nid(page);
|
|
|
int zid = page_zonenum(page);
|
|
|
|
|
|
- return mem_cgroup_zoneinfo(memcg, nid, zid);
|
|
|
+ return &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
}
|
|
|
|
|
|
static struct mem_cgroup_tree_per_zone *
|
|
@@ -772,16 +774,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
|
|
|
unsigned long long excess;
|
|
|
struct mem_cgroup_per_zone *mz;
|
|
|
struct mem_cgroup_tree_per_zone *mctz;
|
|
|
- int nid = page_to_nid(page);
|
|
|
- int zid = page_zonenum(page);
|
|
|
- mctz = soft_limit_tree_from_page(page);
|
|
|
|
|
|
+ mctz = soft_limit_tree_from_page(page);
|
|
|
/*
|
|
|
* Necessary to update all ancestors when hierarchy is used.
|
|
|
* because their event counter is not touched.
|
|
|
*/
|
|
|
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
|
|
|
- mz = mem_cgroup_zoneinfo(memcg, nid, zid);
|
|
|
+ mz = mem_cgroup_page_zoneinfo(memcg, page);
|
|
|
excess = res_counter_soft_limit_excess(&memcg->res);
|
|
|
/*
|
|
|
* We have to update the tree if mz is on RB-tree or
|
|
@@ -804,14 +804,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
|
|
|
|
|
|
static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
- int node, zone;
|
|
|
- struct mem_cgroup_per_zone *mz;
|
|
|
struct mem_cgroup_tree_per_zone *mctz;
|
|
|
+ struct mem_cgroup_per_zone *mz;
|
|
|
+ int nid, zid;
|
|
|
|
|
|
- for_each_node(node) {
|
|
|
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
|
|
- mz = mem_cgroup_zoneinfo(memcg, node, zone);
|
|
|
- mctz = soft_limit_tree_node_zone(node, zone);
|
|
|
+ for_each_node(nid) {
|
|
|
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
|
|
+ mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
+ mctz = soft_limit_tree_node_zone(nid, zid);
|
|
|
mem_cgroup_remove_exceeded(memcg, mz, mctz);
|
|
|
}
|
|
|
}
|
|
@@ -946,8 +946,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
|
|
|
__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
|
|
|
}
|
|
|
|
|
|
-unsigned long
|
|
|
-mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
{
|
|
|
struct mem_cgroup_per_zone *mz;
|
|
|
|
|
@@ -955,46 +954,38 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|
|
return mz->lru_size[lru];
|
|
|
}
|
|
|
|
|
|
-static unsigned long
|
|
|
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
|
|
|
- unsigned int lru_mask)
|
|
|
-{
|
|
|
- struct mem_cgroup_per_zone *mz;
|
|
|
- enum lru_list lru;
|
|
|
- unsigned long ret = 0;
|
|
|
-
|
|
|
- mz = mem_cgroup_zoneinfo(memcg, nid, zid);
|
|
|
-
|
|
|
- for_each_lru(lru) {
|
|
|
- if (BIT(lru) & lru_mask)
|
|
|
- ret += mz->lru_size[lru];
|
|
|
- }
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static unsigned long
|
|
|
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
|
|
- int nid, unsigned int lru_mask)
|
|
|
+static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
|
|
+ int nid,
|
|
|
+ unsigned int lru_mask)
|
|
|
{
|
|
|
- u64 total = 0;
|
|
|
+ unsigned long nr = 0;
|
|
|
int zid;
|
|
|
|
|
|
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
|
|
|
- total += mem_cgroup_zone_nr_lru_pages(memcg,
|
|
|
- nid, zid, lru_mask);
|
|
|
+ VM_BUG_ON((unsigned)nid >= nr_node_ids);
|
|
|
+
|
|
|
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
|
|
+ struct mem_cgroup_per_zone *mz;
|
|
|
+ enum lru_list lru;
|
|
|
|
|
|
- return total;
|
|
|
+ for_each_lru(lru) {
|
|
|
+ if (!(BIT(lru) & lru_mask))
|
|
|
+ continue;
|
|
|
+ mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
+ nr += mz->lru_size[lru];
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return nr;
|
|
|
}
|
|
|
|
|
|
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
|
|
|
unsigned int lru_mask)
|
|
|
{
|
|
|
+ unsigned long nr = 0;
|
|
|
int nid;
|
|
|
- u64 total = 0;
|
|
|
|
|
|
for_each_node_state(nid, N_MEMORY)
|
|
|
- total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
|
|
|
- return total;
|
|
|
+ nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
|
|
|
+ return nr;
|
|
|
}
|
|
|
|
|
|
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
|
|
@@ -1242,11 +1233,9 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|
|
int uninitialized_var(seq);
|
|
|
|
|
|
if (reclaim) {
|
|
|
- int nid = zone_to_nid(reclaim->zone);
|
|
|
- int zid = zone_idx(reclaim->zone);
|
|
|
struct mem_cgroup_per_zone *mz;
|
|
|
|
|
|
- mz = mem_cgroup_zoneinfo(root, nid, zid);
|
|
|
+ mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
|
|
|
iter = &mz->reclaim_iter[reclaim->priority];
|
|
|
if (prev && reclaim->generation != iter->generation) {
|
|
|
iter->last_visited = NULL;
|
|
@@ -1353,7 +1342,7 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
|
|
|
+ mz = mem_cgroup_zone_zoneinfo(memcg, zone);
|
|
|
lruvec = &mz->lruvec;
|
|
|
out:
|
|
|
/*
|
|
@@ -1412,7 +1401,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
|
|
|
if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
|
|
|
pc->mem_cgroup = memcg = root_mem_cgroup;
|
|
|
|
|
|
- mz = page_cgroup_zoneinfo(memcg, page);
|
|
|
+ mz = mem_cgroup_page_zoneinfo(memcg, page);
|
|
|
lruvec = &mz->lruvec;
|
|
|
out:
|
|
|
/*
|
|
@@ -5305,7 +5294,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
|
|
|
|
|
|
for_each_online_node(nid)
|
|
|
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
|
|
- mz = mem_cgroup_zoneinfo(memcg, nid, zid);
|
|
|
+ mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
|
|
|
rstat = &mz->lruvec.reclaim_stat;
|
|
|
|
|
|
recent_rotated[0] += rstat->recent_rotated[0];
|