tcp: Fix data-races around sysctl_tcp_reordering.
[ Upstream commit 46778cd16e6a5ad1b2e3a91f6c057c907379418e ]
While reading sysctl_tcp_reordering, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.
Fixes: 1da177e4c3
("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
dc1a78a2b2
commit
474510e174
@@ -440,7 +440,7 @@ void tcp_init_sock(struct sock *sk)
|
|||||||
tp->snd_cwnd_clamp = ~0;
|
tp->snd_cwnd_clamp = ~0;
|
||||||
tp->mss_cache = TCP_MSS_DEFAULT;
|
tp->mss_cache = TCP_MSS_DEFAULT;
|
||||||
|
|
||||||
tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
|
tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
|
||||||
tcp_assign_congestion_control(sk);
|
tcp_assign_congestion_control(sk);
|
||||||
|
|
||||||
tp->tsoffset = 0;
|
tp->tsoffset = 0;
|
||||||
|
@@ -2099,6 +2099,7 @@ void tcp_enter_loss(struct sock *sk)
|
|||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct net *net = sock_net(sk);
|
struct net *net = sock_net(sk);
|
||||||
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
||||||
|
u8 reordering;
|
||||||
|
|
||||||
tcp_timeout_mark_lost(sk);
|
tcp_timeout_mark_lost(sk);
|
||||||
|
|
||||||
@@ -2119,10 +2120,12 @@ void tcp_enter_loss(struct sock *sk)
|
|||||||
/* Timeout in disordered state after receiving substantial DUPACKs
|
/* Timeout in disordered state after receiving substantial DUPACKs
|
||||||
* suggests that the degree of reordering is over-estimated.
|
* suggests that the degree of reordering is over-estimated.
|
||||||
*/
|
*/
|
||||||
|
reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
|
||||||
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
|
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
|
||||||
tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
|
tp->sacked_out >= reordering)
|
||||||
tp->reordering = min_t(unsigned int, tp->reordering,
|
tp->reordering = min_t(unsigned int, tp->reordering,
|
||||||
net->ipv4.sysctl_tcp_reordering);
|
reordering);
|
||||||
|
|
||||||
tcp_set_ca_state(sk, TCP_CA_Loss);
|
tcp_set_ca_state(sk, TCP_CA_Loss);
|
||||||
tp->high_seq = tp->snd_nxt;
|
tp->high_seq = tp->snd_nxt;
|
||||||
tcp_ecn_queue_cwr(tp);
|
tcp_ecn_queue_cwr(tp);
|
||||||
@@ -3411,7 +3414,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
|||||||
* new SACK or ECE mark may first advance cwnd here and later reduce
|
* new SACK or ECE mark may first advance cwnd here and later reduce
|
||||||
* cwnd in tcp_fastretrans_alert() based on more states.
|
* cwnd in tcp_fastretrans_alert() based on more states.
|
||||||
*/
|
*/
|
||||||
if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
|
if (tcp_sk(sk)->reordering >
|
||||||
|
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
|
||||||
return flag & FLAG_FORWARD_PROGRESS;
|
return flag & FLAG_FORWARD_PROGRESS;
|
||||||
|
|
||||||
return flag & FLAG_DATA_ACKED;
|
return flag & FLAG_DATA_ACKED;
|
||||||
|
@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk)
|
|||||||
if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
|
if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
|
||||||
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
|
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
|
||||||
if (val < tp->reordering &&
|
if (val < tp->reordering &&
|
||||||
tp->reordering != net->ipv4.sysctl_tcp_reordering)
|
tp->reordering !=
|
||||||
|
READ_ONCE(net->ipv4.sysctl_tcp_reordering))
|
||||||
tcp_metric_set(tm, TCP_METRIC_REORDERING,
|
tcp_metric_set(tm, TCP_METRIC_REORDERING,
|
||||||
tp->reordering);
|
tp->reordering);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user