tcp: annotate sk->sk_wmem_queued lockless reads
For the sake of tcp_poll(), there are few places where we fetch sk->sk_wmem_queued while this field can change from IRQ or other cpu. We need to add READ_ONCE() annotations, and also make sure write sides use corresponding WRITE_ONCE() to avoid store-tearing. sk_wmem_queued_add() helper is added so that we can in the future convert to ADD_ONCE() or equivalent if/when available. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
e292f05e0d
commit
ab4e846a82
@@ -659,7 +659,7 @@ static void skb_entail(struct sock *sk, struct sk_buff *skb)
|
||||
tcb->sacked = 0;
|
||||
__skb_header_release(skb);
|
||||
tcp_add_write_queue_tail(sk, skb);
|
||||
sk->sk_wmem_queued += skb->truesize;
|
||||
sk_wmem_queued_add(sk, skb->truesize);
|
||||
sk_mem_charge(sk, skb->truesize);
|
||||
if (tp->nonagle & TCP_NAGLE_PUSH)
|
||||
tp->nonagle &= ~TCP_NAGLE_PUSH;
|
||||
@@ -1034,7 +1034,7 @@ new_segment:
|
||||
skb->len += copy;
|
||||
skb->data_len += copy;
|
||||
skb->truesize += copy;
|
||||
sk->sk_wmem_queued += copy;
|
||||
sk_wmem_queued_add(sk, copy);
|
||||
sk_mem_charge(sk, copy);
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
|
||||
|
Reference in New Issue
Block a user