foundations of per-cgroup memory pressure controlling.
This patch replaces all uses of struct sock fields' memory_pressure, memory_allocated, sockets_allocated, and sysctl_mem to acessor macros. Those macros can either receive a socket argument, or a mem_cgroup argument, depending on the context they live in. Since we're only doing a macro wrapping here, no performance impact at all is expected in the case where we don't have cgroups disabled. Signed-off-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Hiroyouki Kamezawa <kamezawa.hiroyu@jp.fujitsu.com> CC: David S. Miller <davem@davemloft.net> CC: Eric W. Biederman <ebiederm@xmission.com> CC: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committad av
David S. Miller

förälder
e5671dfae5
incheckning
180d8cd942
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
|
||||
|
||||
local_bh_disable();
|
||||
orphans = percpu_counter_sum_positive(&tcp_orphan_count);
|
||||
sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
|
||||
sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
|
||||
local_bh_enable();
|
||||
|
||||
socket_seq_show(seq);
|
||||
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
|
||||
sock_prot_inuse_get(net, &tcp_prot), orphans,
|
||||
tcp_death_row.tw_count, sockets,
|
||||
atomic_long_read(&tcp_memory_allocated));
|
||||
proto_memory_allocated(&tcp_prot));
|
||||
seq_printf(seq, "UDP: inuse %d mem %ld\n",
|
||||
sock_prot_inuse_get(net, &udp_prot),
|
||||
atomic_long_read(&udp_memory_allocated));
|
||||
proto_memory_allocated(&udp_prot));
|
||||
seq_printf(seq, "UDPLITE: inuse %d\n",
|
||||
sock_prot_inuse_get(net, &udplite_prot));
|
||||
seq_printf(seq, "RAW: inuse %d\n",
|
||||
|
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
||||
/* Check #1 */
|
||||
if (tp->rcv_ssthresh < tp->window_clamp &&
|
||||
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
||||
!tcp_memory_pressure) {
|
||||
!sk_under_memory_pressure(sk)) {
|
||||
int incr;
|
||||
|
||||
/* Check #2. Increase window, if skb with such overhead
|
||||
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
|
||||
|
||||
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
|
||||
!tcp_memory_pressure &&
|
||||
atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
|
||||
!sk_under_memory_pressure(sk) &&
|
||||
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
|
||||
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
|
||||
sysctl_tcp_rmem[2]);
|
||||
}
|
||||
@@ -4866,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
tcp_clamp_window(sk);
|
||||
else if (tcp_memory_pressure)
|
||||
else if (sk_under_memory_pressure(sk))
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
||||
|
||||
tcp_collapse_ofo_queue(sk);
|
||||
@@ -4932,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
|
||||
return 0;
|
||||
|
||||
/* If we are under global TCP memory pressure, do not expand. */
|
||||
if (tcp_memory_pressure)
|
||||
if (sk_under_memory_pressure(sk))
|
||||
return 0;
|
||||
|
||||
/* If we are under soft global TCP memory pressure, do not expand. */
|
||||
if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
|
||||
if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
|
||||
return 0;
|
||||
|
||||
/* If we filled the congestion window, do not expand. */
|
||||
|
@@ -1917,7 +1917,7 @@ static int tcp_v4_init_sock(struct sock *sk)
|
||||
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
|
||||
|
||||
local_bh_disable();
|
||||
percpu_counter_inc(&tcp_sockets_allocated);
|
||||
sk_sockets_allocated_inc(sk);
|
||||
local_bh_enable();
|
||||
|
||||
return 0;
|
||||
@@ -1973,7 +1973,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
|
||||
tp->cookie_values = NULL;
|
||||
}
|
||||
|
||||
percpu_counter_dec(&tcp_sockets_allocated);
|
||||
sk_sockets_allocated_dec(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_v4_destroy_sock);
|
||||
|
||||
|
@@ -1922,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
|
||||
if (free_space < (full_space >> 1)) {
|
||||
icsk->icsk_ack.quick = 0;
|
||||
|
||||
if (tcp_memory_pressure)
|
||||
if (sk_under_memory_pressure(sk))
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
|
||||
4U * tp->advmss);
|
||||
|
||||
|
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
|
||||
}
|
||||
|
||||
out:
|
||||
if (tcp_memory_pressure)
|
||||
if (sk_under_memory_pressure(sk))
|
||||
sk_mem_reclaim(sk);
|
||||
out_unlock:
|
||||
bh_unlock_sock(sk);
|
||||
|
Referens i nytt ärende
Block a user