|
@@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
|
|
|
{
|
|
|
struct mem_cgroup_per_node *pn;
|
|
|
|
|
|
+ /* Update node */
|
|
|
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
|
|
+
|
|
|
if (mem_cgroup_disabled())
|
|
|
return;
|
|
|
+
|
|
|
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
|
|
+
|
|
|
+ /* Update memcg */
|
|
|
__mod_memcg_state(pn->memcg, idx, val);
|
|
|
+
|
|
|
+ /* Update lruvec */
|
|
|
__this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
|
}
|
|
|
|
|
|
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
|
|
enum node_stat_item idx, int val)
|
|
|
{
|
|
|
- struct mem_cgroup_per_node *pn;
|
|
|
-
|
|
|
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
|
|
- if (mem_cgroup_disabled())
|
|
|
- return;
|
|
|
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
|
|
- mod_memcg_state(pn->memcg, idx, val);
|
|
|
- this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
|
+ preempt_disable();
|
|
|
+ __mod_lruvec_state(lruvec, idx, val);
|
|
|
+ preempt_enable();
|
|
|
}
|
|
|
|
|
|
static inline void __mod_lruvec_page_state(struct page *page,
|
|
|
enum node_stat_item idx, int val)
|
|
|
{
|
|
|
- struct mem_cgroup_per_node *pn;
|
|
|
+ pg_data_t *pgdat = page_pgdat(page);
|
|
|
+ struct lruvec *lruvec;
|
|
|
|
|
|
- __mod_node_page_state(page_pgdat(page), idx, val);
|
|
|
- if (mem_cgroup_disabled() || !page->mem_cgroup)
|
|
|
+ /* Untracked pages have no memcg, no lruvec. Update only the node */
|
|
|
+ if (!page->mem_cgroup) {
|
|
|
+ __mod_node_page_state(pgdat, idx, val);
|
|
|
return;
|
|
|
- __mod_memcg_state(page->mem_cgroup, idx, val);
|
|
|
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
|
|
|
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
|
+ }
|
|
|
+
|
|
|
+ lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
|
|
|
+ __mod_lruvec_state(lruvec, idx, val);
|
|
|
}
|
|
|
|
|
|
static inline void mod_lruvec_page_state(struct page *page,
|
|
|
enum node_stat_item idx, int val)
|
|
|
{
|
|
|
- struct mem_cgroup_per_node *pn;
|
|
|
-
|
|
|
- mod_node_page_state(page_pgdat(page), idx, val);
|
|
|
- if (mem_cgroup_disabled() || !page->mem_cgroup)
|
|
|
- return;
|
|
|
- mod_memcg_state(page->mem_cgroup, idx, val);
|
|
|
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
|
|
|
- this_cpu_add(pn->lruvec_stat->count[idx], val);
|
|
|
+ preempt_disable();
|
|
|
+ __mod_lruvec_page_state(page, idx, val);
|
|
|
+ preempt_enable();
|
|
|
}
|
|
|
|
|
|
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|