tcp: annotate sk->sk_wmem_queued lockless reads
For the sake of tcp_poll(), there are few places where we fetch sk->sk_wmem_queued while this field can change from IRQ or other cpu. We need to add READ_ONCE() annotations, and also make sure write sides use corresponding WRITE_ONCE() to avoid store-tearing. sk_wmem_queued_add() helper is added so that we can in the future convert to ADD_ONCE() or equivalent if/when available. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
e292f05e0d
commit
ab4e846a82
@@ -1199,7 +1199,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
||||
WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
|
||||
__skb_header_release(skb);
|
||||
tcp_add_write_queue_tail(sk, skb);
|
||||
sk->sk_wmem_queued += skb->truesize;
|
||||
sk_wmem_queued_add(sk, skb->truesize);
|
||||
sk_mem_charge(sk, skb->truesize);
|
||||
}
|
||||
|
||||
@@ -1333,7 +1333,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
||||
return -ENOMEM; /* We'll just try again later. */
|
||||
skb_copy_decrypted(buff, skb);
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
sk_wmem_queued_add(sk, buff->truesize);
|
||||
sk_mem_charge(sk, buff->truesize);
|
||||
nlen = skb->len - len - nsize;
|
||||
buff->truesize += nlen;
|
||||
@@ -1443,7 +1443,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
|
||||
if (delta_truesize) {
|
||||
skb->truesize -= delta_truesize;
|
||||
sk->sk_wmem_queued -= delta_truesize;
|
||||
sk_wmem_queued_add(sk, -delta_truesize);
|
||||
sk_mem_uncharge(sk, delta_truesize);
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
}
|
||||
@@ -1888,7 +1888,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
||||
return -ENOMEM;
|
||||
skb_copy_decrypted(buff, skb);
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
sk_wmem_queued_add(sk, buff->truesize);
|
||||
sk_mem_charge(sk, buff->truesize);
|
||||
buff->truesize += nlen;
|
||||
skb->truesize -= nlen;
|
||||
@@ -2152,7 +2152,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
|
||||
if (!nskb)
|
||||
return -1;
|
||||
sk->sk_wmem_queued += nskb->truesize;
|
||||
sk_wmem_queued_add(sk, nskb->truesize);
|
||||
sk_mem_charge(sk, nskb->truesize);
|
||||
|
||||
skb = tcp_send_head(sk);
|
||||
@@ -3222,7 +3222,7 @@ int tcp_send_synack(struct sock *sk)
|
||||
tcp_rtx_queue_unlink_and_free(skb, sk);
|
||||
__skb_header_release(nskb);
|
||||
tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
|
||||
sk->sk_wmem_queued += nskb->truesize;
|
||||
sk_wmem_queued_add(sk, nskb->truesize);
|
||||
sk_mem_charge(sk, nskb->truesize);
|
||||
skb = nskb;
|
||||
}
|
||||
@@ -3447,7 +3447,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
tcb->end_seq += skb->len;
|
||||
__skb_header_release(skb);
|
||||
sk->sk_wmem_queued += skb->truesize;
|
||||
sk_wmem_queued_add(sk, skb->truesize);
|
||||
sk_mem_charge(sk, skb->truesize);
|
||||
WRITE_ONCE(tp->write_seq, tcb->end_seq);
|
||||
tp->packets_out += tcp_skb_pcount(skb);
|
||||
|
Reference in New Issue
Block a user