|
@@ -2801,9 +2801,9 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
-static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
|
|
+static inline unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
|
|
{
|
|
|
- u64 val;
|
|
|
+ unsigned long val;
|
|
|
|
|
|
if (mem_cgroup_is_root(memcg)) {
|
|
|
val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
|
|
@@ -2816,7 +2816,7 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
|
|
else
|
|
|
val = page_counter_read(&memcg->memsw);
|
|
|
}
|
|
|
- return val << PAGE_SHIFT;
|
|
|
+ return val;
|
|
|
}
|
|
|
|
|
|
enum {
|
|
@@ -2850,9 +2850,9 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
|
|
|
switch (MEMFILE_ATTR(cft->private)) {
|
|
|
case RES_USAGE:
|
|
|
if (counter == &memcg->memory)
|
|
|
- return mem_cgroup_usage(memcg, false);
|
|
|
+ return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
|
|
|
if (counter == &memcg->memsw)
|
|
|
- return mem_cgroup_usage(memcg, true);
|
|
|
+ return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
|
|
|
return (u64)page_counter_read(counter) * PAGE_SIZE;
|
|
|
case RES_LIMIT:
|
|
|
return (u64)counter->limit * PAGE_SIZE;
|
|
@@ -3352,7 +3352,6 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
|
|
|
ret = page_counter_memparse(args, "-1", &threshold);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
- threshold <<= PAGE_SHIFT;
|
|
|
|
|
|
mutex_lock(&memcg->thresholds_lock);
|
|
|
|