net: tcp_memcontrol: simplify linkage between socket and page counter

There won't be any separate counters for socket memory consumed by
protocols other than TCP in the future.  Remove the indirection and link
sockets directly to their owning memory cgroup.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner
2016-01-14 15:21:17 -08:00
committed by Linus Torvalds
父節點 e805605c72
當前提交 baac50bbc3
共有 10 個文件被更改,包括 69 次插入171 次删除

查看文件

@@ -1819,7 +1819,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
sk_sockets_allocated_dec(sk);
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sock_release_memcg(sk);
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -2343,11 +2343,6 @@ struct proto tcp_prot = {
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
#ifdef CONFIG_MEMCG_KMEM
.init_cgroup = tcp_init_cgroup,
.destroy_cgroup = tcp_destroy_cgroup,
.proto_cgroup = tcp_proto_cgroup,
#endif
.diag_destroy = tcp_abort,
};

查看文件

@@ -8,60 +8,47 @@
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
struct page_counter *counter_parent = NULL;
/*
* The root cgroup does not use page_counters, but rather,
* rely on the data already collected by the network
* subsystem
*/
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
struct page_counter *counter_parent = NULL;
struct cg_proto *cg_proto, *parent_cg;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
if (memcg == root_mem_cgroup)
return 0;
cg_proto->memory_pressure = 0;
cg_proto->memcg = memcg;
memcg->tcp_mem.memory_pressure = 0;
parent_cg = tcp_prot.proto_cgroup(parent);
if (parent_cg)
counter_parent = &parent_cg->memory_allocated;
if (parent)
counter_parent = &parent->tcp_mem.memory_allocated;
page_counter_init(&cg_proto->memory_allocated, counter_parent);
page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent);
return 0;
}
EXPORT_SYMBOL(tcp_init_cgroup);
void tcp_destroy_cgroup(struct mem_cgroup *memcg)
{
struct cg_proto *cg_proto;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
if (memcg == root_mem_cgroup)
return;
if (cg_proto->active)
if (memcg->tcp_mem.active)
static_key_slow_dec(&memcg_socket_limit_enabled);
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
{
struct cg_proto *cg_proto;
int ret;
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
if (memcg == root_mem_cgroup)
return -EINVAL;
ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages);
ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages);
if (ret)
return ret;
if (!cg_proto->active) {
if (!memcg->tcp_mem.active) {
/*
* The active flag needs to be written after the static_key
* update. This is what guarantees that the socket activation
@@ -79,7 +66,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
* patched in yet.
*/
static_key_slow_inc(&memcg_socket_limit_enabled);
cg_proto->active = true;
memcg->tcp_mem.active = true;
}
return 0;
@@ -123,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg);
u64 val;
switch (cft->private) {
case RES_LIMIT:
if (!cg_proto)
return PAGE_COUNTER_MAX;
val = cg_proto->memory_allocated.limit;
if (memcg == root_mem_cgroup)
val = PAGE_COUNTER_MAX;
else
val = memcg->tcp_mem.memory_allocated.limit;
val *= PAGE_SIZE;
break;
case RES_USAGE:
if (!cg_proto)
if (memcg == root_mem_cgroup)
val = atomic_long_read(&tcp_memory_allocated);
else
val = page_counter_read(&cg_proto->memory_allocated);
val = page_counter_read(&memcg->tcp_mem.memory_allocated);
val *= PAGE_SIZE;
break;
case RES_FAILCNT:
if (!cg_proto)
if (memcg == root_mem_cgroup)
return 0;
val = cg_proto->memory_allocated.failcnt;
val = memcg->tcp_mem.memory_allocated.failcnt;
break;
case RES_MAX_USAGE:
if (!cg_proto)
if (memcg == root_mem_cgroup)
return 0;
val = cg_proto->memory_allocated.watermark;
val = memcg->tcp_mem.memory_allocated.watermark;
val *= PAGE_SIZE;
break;
default:
@@ -161,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg;
struct cg_proto *cg_proto;
memcg = mem_cgroup_from_css(of_css(of));
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
if (memcg == root_mem_cgroup)
return nbytes;
switch (of_cft(of)->private) {
case RES_MAX_USAGE:
page_counter_reset_watermark(&cg_proto->memory_allocated);
page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated);
break;
case RES_FAILCNT:
cg_proto->memory_allocated.failcnt = 0;
memcg->tcp_mem.memory_allocated.failcnt = 0;
break;
}

查看文件

@@ -2821,8 +2821,8 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
sk_memory_allocated_add(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
mem_cgroup_charge_skmem(sk->sk_cgrp, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
mem_cgroup_charge_skmem(sk->sk_memcg, amt);
}
/* Send a FIN. The caller locks the socket for us.