tcp: annotate tp->copied_seq lockless reads
There are few places where we fetch tp->copied_seq while this field can change from IRQ or other cpu. We need to add READ_ONCE() annotations, and also make sure write sides use corresponding WRITE_ONCE() to avoid store-tearing. Note that tcp_inq_hint() was already using READ_ONCE(tp->copied_seq) Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
dba7d9b8c7
commit
7db48e9839
@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
|
||||
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
|
||||
int target, struct sock *sk)
|
||||
{
|
||||
return (READ_ONCE(tp->rcv_nxt) - tp->copied_seq >= target) ||
|
||||
return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
|
||||
(sk->sk_prot->stream_memory_read ?
|
||||
sk->sk_prot->stream_memory_read(sk) : false);
|
||||
}
|
||||
@@ -546,7 +546,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
(state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
|
||||
int target = sock_rcvlowat(sk, 0, INT_MAX);
|
||||
|
||||
if (tp->urg_seq == tp->copied_seq &&
|
||||
if (tp->urg_seq == READ_ONCE(tp->copied_seq) &&
|
||||
!sock_flag(sk, SOCK_URGINLINE) &&
|
||||
tp->urg_data)
|
||||
target++;
|
||||
@@ -607,7 +607,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
unlock_sock_fast(sk, slow);
|
||||
break;
|
||||
case SIOCATMARK:
|
||||
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
|
||||
answ = tp->urg_data && tp->urg_seq == READ_ONCE(tp->copied_seq);
|
||||
break;
|
||||
case SIOCOUTQ:
|
||||
if (sk->sk_state == TCP_LISTEN)
|
||||
@@ -1668,9 +1668,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
sk_eat_skb(sk, skb);
|
||||
if (!desc->count)
|
||||
break;
|
||||
tp->copied_seq = seq;
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
}
|
||||
tp->copied_seq = seq;
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
@@ -1819,7 +1819,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
|
||||
out:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (length) {
|
||||
tp->copied_seq = seq;
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
/* Clean up data we have read: This will do ACK frames. */
|
||||
@@ -2117,7 +2117,7 @@ found_ok_skb:
|
||||
if (urg_offset < used) {
|
||||
if (!urg_offset) {
|
||||
if (!sock_flag(sk, SOCK_URGINLINE)) {
|
||||
++*seq;
|
||||
WRITE_ONCE(*seq, *seq + 1);
|
||||
urg_hole++;
|
||||
offset++;
|
||||
used--;
|
||||
@@ -2139,7 +2139,7 @@ found_ok_skb:
|
||||
}
|
||||
}
|
||||
|
||||
*seq += used;
|
||||
WRITE_ONCE(*seq, *seq + used);
|
||||
copied += used;
|
||||
len -= used;
|
||||
|
||||
@@ -2166,7 +2166,7 @@ skip_copy:
|
||||
|
||||
found_fin_ok:
|
||||
/* Process the FIN. */
|
||||
++*seq;
|
||||
WRITE_ONCE(*seq, *seq + 1);
|
||||
if (!(flags & MSG_PEEK))
|
||||
sk_eat_skb(sk, skb);
|
||||
break;
|
||||
@@ -2588,7 +2588,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
__kfree_skb(sk->sk_rx_skb_cache);
|
||||
sk->sk_rx_skb_cache = NULL;
|
||||
}
|
||||
tp->copied_seq = tp->rcv_nxt;
|
||||
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
|
||||
tp->urg_data = 0;
|
||||
tcp_write_queue_purge(sk);
|
||||
tcp_fastopen_active_disable_ofo_check(sk);
|
||||
|
Reference in New Issue
Block a user