FROMGIT: bpf: Support all gso types in bpf_skb_change_proto()

Since we no longer modify gso_size, it is now theoretically
safe to not set SKB_GSO_DODGY and reset gso_segs to zero.

This also means the skb_is_gso_tcp() check should no longer
be necessary.

Unfortunately we cannot remove the skb_{decrease,increase}_gso_size()
helpers, as they are still used elsewhere:

  bpf_skb_net_grow() without BPF_F_ADJ_ROOM_FIXED_GSO
  bpf_skb_net_shrink() without BPF_F_ADJ_ROOM_FIXED_GSO
  net/core/lwt_bpf.c's handle_gso_type()

Signed-off-by: Maciej Żenczykowski <maze@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Dongseok Yi <dseok.yi@samsung.com>
Cc: Willem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/bpf/20210617000953.2787453-3-zenczykowski@gmail.com
(cherry picked from commit 0bc919d3e0b8149a60d2444c6a8e2b5974556522 https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/commit/?id=0bc919d3e0b8149a60d2444c6a8e2b5974556522)
Test: builds, TreeHugger
Bug: 188690383
Change-Id: I46036bbacae9d1af7364ec0623dd75f0df5845fa
This commit is contained in:
Maciej Żenczykowski
2021-06-16 17:09:52 -07:00
parent 4e90d52c82
commit 00aec39e2e

View File

@@ -3244,9 +3244,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
@@ -3258,17 +3255,11 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
/* SKB_GSO_TCPV4 needs to be changed into /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */
* SKB_GSO_TCPV6.
*/
if (shinfo->gso_type & SKB_GSO_TCPV4) { if (shinfo->gso_type & SKB_GSO_TCPV4) {
shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type &= ~SKB_GSO_TCPV4;
shinfo->gso_type |= SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV6;
} }
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
} }
skb->protocol = htons(ETH_P_IPV6); skb->protocol = htons(ETH_P_IPV6);
@@ -3283,9 +3274,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
@@ -3297,17 +3285,11 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
/* SKB_GSO_TCPV6 needs to be changed into /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */
* SKB_GSO_TCPV4.
*/
if (shinfo->gso_type & SKB_GSO_TCPV6) { if (shinfo->gso_type & SKB_GSO_TCPV6) {
shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type &= ~SKB_GSO_TCPV6;
shinfo->gso_type |= SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV4;
} }
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
} }
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IP);