|
@@ -8,60 +8,47 @@
|
|
|
|
|
|
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
|
|
|
{
|
|
|
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
|
|
|
+ struct page_counter *counter_parent = NULL;
|
|
|
/*
|
|
|
* The root cgroup does not use page_counters, but rather,
|
|
|
* rely on the data already collected by the network
|
|
|
* subsystem
|
|
|
*/
|
|
|
- struct mem_cgroup *parent = parent_mem_cgroup(memcg);
|
|
|
- struct page_counter *counter_parent = NULL;
|
|
|
- struct cg_proto *cg_proto, *parent_cg;
|
|
|
-
|
|
|
- cg_proto = tcp_prot.proto_cgroup(memcg);
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return 0;
|
|
|
|
|
|
- cg_proto->memory_pressure = 0;
|
|
|
- cg_proto->memcg = memcg;
|
|
|
+ memcg->tcp_mem.memory_pressure = 0;
|
|
|
|
|
|
- parent_cg = tcp_prot.proto_cgroup(parent);
|
|
|
- if (parent_cg)
|
|
|
- counter_parent = &parent_cg->memory_allocated;
|
|
|
+ if (parent)
|
|
|
+ counter_parent = &parent->tcp_mem.memory_allocated;
|
|
|
|
|
|
- page_counter_init(&cg_proto->memory_allocated, counter_parent);
|
|
|
+ page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(tcp_init_cgroup);
|
|
|
|
|
|
void tcp_destroy_cgroup(struct mem_cgroup *memcg)
|
|
|
{
|
|
|
- struct cg_proto *cg_proto;
|
|
|
-
|
|
|
- cg_proto = tcp_prot.proto_cgroup(memcg);
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return;
|
|
|
|
|
|
- if (cg_proto->active)
|
|
|
+ if (memcg->tcp_mem.active)
|
|
|
static_key_slow_dec(&memcg_socket_limit_enabled);
|
|
|
-
|
|
|
}
|
|
|
-EXPORT_SYMBOL(tcp_destroy_cgroup);
|
|
|
|
|
|
static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
|
|
|
{
|
|
|
- struct cg_proto *cg_proto;
|
|
|
int ret;
|
|
|
|
|
|
- cg_proto = tcp_prot.proto_cgroup(memcg);
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages);
|
|
|
+ ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- if (!cg_proto->active) {
|
|
|
+ if (!memcg->tcp_mem.active) {
|
|
|
/*
|
|
|
* The active flag needs to be written after the static_key
|
|
|
* update. This is what guarantees that the socket activation
|
|
@@ -79,7 +66,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
|
|
|
* patched in yet.
|
|
|
*/
|
|
|
static_key_slow_inc(&memcg_socket_limit_enabled);
|
|
|
- cg_proto->active = true;
|
|
|
+ memcg->tcp_mem.active = true;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -123,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
|
|
|
static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
|
|
|
{
|
|
|
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
|
|
- struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg);
|
|
|
u64 val;
|
|
|
|
|
|
switch (cft->private) {
|
|
|
case RES_LIMIT:
|
|
|
- if (!cg_proto)
|
|
|
- return PAGE_COUNTER_MAX;
|
|
|
- val = cg_proto->memory_allocated.limit;
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
+ val = PAGE_COUNTER_MAX;
|
|
|
+ else
|
|
|
+ val = memcg->tcp_mem.memory_allocated.limit;
|
|
|
val *= PAGE_SIZE;
|
|
|
break;
|
|
|
case RES_USAGE:
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
val = atomic_long_read(&tcp_memory_allocated);
|
|
|
else
|
|
|
- val = page_counter_read(&cg_proto->memory_allocated);
|
|
|
+ val = page_counter_read(&memcg->tcp_mem.memory_allocated);
|
|
|
val *= PAGE_SIZE;
|
|
|
break;
|
|
|
case RES_FAILCNT:
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return 0;
|
|
|
- val = cg_proto->memory_allocated.failcnt;
|
|
|
+ val = memcg->tcp_mem.memory_allocated.failcnt;
|
|
|
break;
|
|
|
case RES_MAX_USAGE:
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return 0;
|
|
|
- val = cg_proto->memory_allocated.watermark;
|
|
|
+ val = memcg->tcp_mem.memory_allocated.watermark;
|
|
|
val *= PAGE_SIZE;
|
|
|
break;
|
|
|
default:
|
|
@@ -161,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,
|
|
|
char *buf, size_t nbytes, loff_t off)
|
|
|
{
|
|
|
struct mem_cgroup *memcg;
|
|
|
- struct cg_proto *cg_proto;
|
|
|
|
|
|
memcg = mem_cgroup_from_css(of_css(of));
|
|
|
- cg_proto = tcp_prot.proto_cgroup(memcg);
|
|
|
- if (!cg_proto)
|
|
|
+ if (memcg == root_mem_cgroup)
|
|
|
return nbytes;
|
|
|
|
|
|
switch (of_cft(of)->private) {
|
|
|
case RES_MAX_USAGE:
|
|
|
- page_counter_reset_watermark(&cg_proto->memory_allocated);
|
|
|
+ page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated);
|
|
|
break;
|
|
|
case RES_FAILCNT:
|
|
|
- cg_proto->memory_allocated.failcnt = 0;
|
|
|
+ memcg->tcp_mem.memory_allocated.failcnt = 0;
|
|
|
break;
|
|
|
}
|
|
|
|