ipv6: kill sk_dst_lock
While testing the np->opt RCU conversion, I found that UDP/IPv6 was using a mixture of xchg() and sk_dst_lock to protect concurrent changes to sk->sk_dst_cache, leading to possible corruptions and crashes. ip6_sk_dst_lookup_flow() uses sk_dst_check() anyway, so the simplest way to fix the mess is to remove sk_dst_lock completely, as we did for IPv4. __ip6_dst_store() and ip6_dst_store() share same implementation. sk_setup_caps() being called with socket lock being held or not, we have to use sk_dst_set() instead of __sk_dst_set() Note that I had to move the "np->dst_cookie = rt6_get_cookie(rt);" in ip6_dst_store() before the sk_setup_caps(sk, dst) call. This is because ip6_dst_store() can be called from process context, without any lock held. As soon as the dst is installed in sk->sk_dst_cache, dst can be freed from another cpu doing a concurrent ip6_dst_store() Doing the dst dereference before doing the install is needed to make sure no use after free would trigger. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
c836a8ba93
commit
6bd4f355df
@@ -254,7 +254,6 @@ struct cg_proto;
|
||||
* @sk_wq: sock wait queue and async head
|
||||
* @sk_rx_dst: receive input route used by early demux
|
||||
* @sk_dst_cache: destination cache
|
||||
* @sk_dst_lock: destination cache lock
|
||||
* @sk_policy: flow policy
|
||||
* @sk_receive_queue: incoming packets
|
||||
* @sk_wmem_alloc: transmit queue bytes committed
|
||||
@@ -393,7 +392,7 @@ struct sock {
|
||||
#endif
|
||||
struct dst_entry *sk_rx_dst;
|
||||
struct dst_entry __rcu *sk_dst_cache;
|
||||
spinlock_t sk_dst_lock;
|
||||
/* Note: 32bit hole on 64bit arches */
|
||||
atomic_t sk_wmem_alloc;
|
||||
atomic_t sk_omem_alloc;
|
||||
int sk_sndbuf;
|
||||
|
Reference in New Issue
Block a user