net/tls: prevent skb_orphan() from leaking TLS plain text with offload
sk_validate_xmit_skb() and drivers depend on the sk member of
struct sk_buff to identify segments requiring encryption.
Any operation which removes or does not preserve the original TLS
socket such as skb_orphan() or skb_clone() will cause clear text
leaks.
Make the TCP socket underlying an offloaded TLS connection
mark all skbs as decrypted, if TLS TX is in offload mode.
Then in sk_validate_xmit_skb() catch skbs which have no socket
(or a socket with no validation) and decrypted flag set.
Note that CONFIG_SOCK_VALIDATE_XMIT, CONFIG_TLS_DEVICE and
sk->sk_validate_xmit_skb are slightly interchangeable right now,
they all imply TLS offload. The new checks are guarded by
CONFIG_TLS_DEVICE because that's the option guarding the
sk_buff->decrypted member.
Second, smaller issue with orphaning is that it breaks
the guarantee that packets will be delivered to device
queues in-order. All TLS offload drivers depend on that
scheduling property. This means skb_orphan_partial()'s
trick of preserving partial socket references will cause
issues in the drivers. We need a full orphan, and as a
result netem delay/throttling will cause all TLS offload
skbs to be dropped.
Reusing the sk_buff->decrypted flag also protects from
leaking clear text when incoming, decrypted skb is redirected
(e.g. by TC).
See commit 0608c69c9a
("bpf: sk_msg, sock{map|hash} redirect
through ULP") for justification why the internal flag is safe.
The only location which could leak the flag in is tcp_bpf_sendmsg(),
which is taken care of by clearing the previously unused bit.
v2:
- remove superfluous decrypted mark copy (Willem);
- remove the stale doc entry (Boris);
- rely entirely on EOR marking to prevent coalescing (Boris);
- use an internal sendpages flag instead of marking the socket
(Boris).
v3 (Willem):
- reorganize the can_skb_orphan_partial() condition;
- fix the flag leak-in through tcp_bpf_sendmsg.
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committato da
David S. Miller

parent
0de94de180
commit
414776621d
@@ -984,6 +984,9 @@ new_segment:
|
||||
if (!skb)
|
||||
goto wait_for_memory;
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
|
||||
#endif
|
||||
skb_entail(sk, skb);
|
||||
copy = size_goal;
|
||||
}
|
||||
|
@@ -398,10 +398,14 @@ more_data:
|
||||
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
{
|
||||
struct sk_msg tmp, *msg_tx = NULL;
|
||||
int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
|
||||
int copied = 0, err = 0;
|
||||
struct sk_psock *psock;
|
||||
long timeo;
|
||||
int flags;
|
||||
|
||||
/* Don't let internal do_tcp_sendpages() flags through */
|
||||
flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
|
||||
flags |= MSG_NO_SHARED_FRAGS;
|
||||
|
||||
psock = sk_psock_get(sk);
|
||||
if (unlikely(!psock))
|
||||
|
@@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
||||
buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
|
||||
if (!buff)
|
||||
return -ENOMEM; /* We'll just try again later. */
|
||||
skb_copy_decrypted(buff, skb);
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
sk_mem_charge(sk, buff->truesize);
|
||||
@@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
||||
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
|
||||
if (unlikely(!buff))
|
||||
return -ENOMEM;
|
||||
skb_copy_decrypted(buff, skb);
|
||||
|
||||
sk->sk_wmem_queued += buff->truesize;
|
||||
sk_mem_charge(sk, buff->truesize);
|
||||
@@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
sk_mem_charge(sk, nskb->truesize);
|
||||
|
||||
skb = tcp_send_head(sk);
|
||||
skb_copy_decrypted(nskb, skb);
|
||||
|
||||
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
|
||||
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
|
||||
|
Fai riferimento in un nuovo problema
Block a user