Revert "tcp: switch orphan_count to bare per-cpu counters"

This reverts commit a342cb4772 which is
commit 19757cebf0c5016a1f36f7fe9810a9f0b33c0832 upstream.

The commit breaks the abi and we don't really need to worry about
Android on systems with greater than 256 CPUs at this point in time.

Bug: 161946584
Fixes: a342cb4772 ("tcp: switch orphan_count to bare per-cpu counters")
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ifeb445b4490db185eacc379fbcab2a3ed3b966fb
This commit is contained in:
Greg Kroah-Hartman
2021-11-19 14:21:03 +01:00
parent e101be336e
commit 971945b722
11 changed files with 38 additions and 49 deletions

View File

@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
INC_ORPHAN_COUNT(child);
percpu_counter_inc((child)->sk_prot->orphan_count);
chtls_release_resources(child);
chtls_conn_done(child);
} else {

View File

@@ -95,7 +95,7 @@ struct deferred_skb_cb {
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)

View File

@@ -296,7 +296,7 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
this_cpu_inc(*sk->sk_prot->orphan_count);
percpu_counter_inc(sk->sk_prot->orphan_count);
}
void inet_csk_destroy_sock(struct sock *sk);

View File

@@ -1235,7 +1235,7 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
unsigned int __percpu *orphan_count;
struct percpu_counter *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;

View File

@@ -48,9 +48,7 @@
extern struct inet_hashinfo tcp_hashinfo;
DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
int tcp_orphan_count_sum(void);
extern struct percpu_counter tcp_orphan_count;
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
@@ -292,6 +290,19 @@ static inline bool tcp_out_of_memory(struct sock *sk)
void sk_forced_mem_schedule(struct sock *sk, int size);
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
int orphans = percpu_counter_read_positive(ocp);
if (orphans << shift > sysctl_tcp_max_orphans) {
orphans = percpu_counter_sum_positive(ocp);
if (orphans << shift > sysctl_tcp_max_orphans)
return true;
}
return false;
}
bool tcp_check_oom(struct sock *sk, int shift);

View File

@@ -48,7 +48,7 @@ extern bool dccp_debug;
extern struct inet_hashinfo dccp_hashinfo;
DECLARE_PER_CPU(unsigned int, dccp_orphan_count);
extern struct percpu_counter dccp_orphan_count;
void dccp_time_wait(struct sock *sk, int state, int timeo);

View File

@@ -42,8 +42,8 @@ DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
EXPORT_SYMBOL_GPL(dccp_statistics);
DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
struct percpu_counter dccp_orphan_count;
EXPORT_SYMBOL_GPL(dccp_orphan_count);
struct inet_hashinfo dccp_hashinfo;
EXPORT_SYMBOL_GPL(dccp_hashinfo);
@@ -1055,7 +1055,7 @@ adjudge_to_death:
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
this_cpu_inc(dccp_orphan_count);
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
@@ -1115,10 +1115,13 @@ static int __init dccp_init(void)
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
sizeof_field(struct sk_buff, cb));
rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
if (rc)
goto out_fail;
inet_hashinfo_init(&dccp_hashinfo);
rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
if (rc)
goto out_fail;
goto out_free_percpu;
rc = -ENOBUFS;
dccp_hashinfo.bind_bucket_cachep =
kmem_cache_create("dccp_bind_bucket",
@@ -1223,6 +1226,8 @@ out_free_bind_bucket_cachep:
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
out_free_hashinfo2:
inet_hashinfo2_free_mod(&dccp_hashinfo);
out_free_percpu:
percpu_counter_destroy(&dccp_orphan_count);
out_fail:
dccp_hashinfo.bhash = NULL;
dccp_hashinfo.ehash = NULL;
@@ -1245,6 +1250,7 @@ static void __exit dccp_fini(void)
dccp_ackvec_exit();
dccp_sysctl_exit();
inet_hashinfo2_free_mod(&dccp_hashinfo);
percpu_counter_destroy(&dccp_orphan_count);
}
module_init(dccp_init);

View File

@@ -892,7 +892,7 @@ void inet_csk_destroy_sock(struct sock *sk)
sk_refcnt_debug_release(sk);
this_cpu_dec(*sk->sk_prot->orphan_count);
percpu_counter_dec(sk->sk_prot->orphan_count);
sock_put(sk);
}
@@ -951,7 +951,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
sock_orphan(child);
this_cpu_inc(*sk->sk_prot->orphan_count);
percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);

View File

@@ -598,7 +598,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
if (ok) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
this_cpu_inc(*sk->sk_prot->orphan_count);
percpu_counter_inc(sk->sk_prot->orphan_count);
inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);

View File

@@ -53,7 +53,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
struct net *net = seq->private;
int orphans, sockets;
orphans = tcp_orphan_count_sum();
orphans = percpu_counter_sum_positive(&tcp_orphan_count);
sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
socket_seq_show(seq);

View File

@@ -282,8 +282,8 @@
#include <trace/hooks/ipv4.h>
DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
@@ -2399,36 +2399,11 @@ void tcp_shutdown(struct sock *sk, int how)
}
EXPORT_SYMBOL(tcp_shutdown);
int tcp_orphan_count_sum(void)
{
int i, total = 0;
for_each_possible_cpu(i)
total += per_cpu(tcp_orphan_count, i);
return max(total, 0);
}
static int tcp_orphan_cache;
static struct timer_list tcp_orphan_timer;
#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
static void tcp_orphan_update(struct timer_list *unused)
{
WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
}
static bool tcp_too_many_orphans(int shift)
{
return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans;
}
bool tcp_check_oom(struct sock *sk, int shift)
{
bool too_many_orphans, out_of_socket_memory;
too_many_orphans = tcp_too_many_orphans(shift);
too_many_orphans = tcp_too_many_orphans(sk, shift);
out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans)
@@ -2538,7 +2513,7 @@ adjudge_to_death:
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
this_cpu_inc(tcp_orphan_count);
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -4175,10 +4150,7 @@ void __init tcp_init(void)
sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
inet_hashinfo_init(&tcp_hashinfo);
inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
thash_entries, 21, /* one slot per 2 MB*/