Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix endless loop in nf_tables, from Phil Sutter. 2) Fix cross namespace ip6_gre tunnel hash list corruption, from Olivier Matz. 3) Don't be too strict in phy_start_aneg() otherwise we might not allow restarting auto negotiation. From Heiner Kallweit. 4) Fix various KMSAN uninitialized value cases in tipc, from Ying Xue. 5) Memory leak in act_tunnel_key, from Davide Caratti. 6) Handle chip errata of mv88e6390 PHY, from Andrew Lunn. 7) Remove linear SKB assumption in fou/fou6, from Eric Dumazet. 8) Missing udplite rehash callbacks, from Alexey Kodanev. 9) Log dirty pages properly in vhost, from Jason Wang. 10) Use consume_skb() in neigh_probe() as this is a normal free not a drop, from Yang Wei. Likewise in macvlan_process_broadcast(). 11) Missing device_del() in mdiobus_register() error paths, from Thomas Petazzoni. 12) Fix checksum handling of short packets in mlx5, from Cong Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (96 commits) bpf: in __bpf_redirect_no_mac pull mac only if present virtio_net: bulk free tx skbs net: phy: phy driver features are mandatory isdn: avm: Fix string plus integer warning from Clang net/mlx5e: Fix cb_ident duplicate in indirect block register net/mlx5e: Fix wrong (zero) TX drop counter indication for representor net/mlx5e: Fix wrong error code return on FEC query failure net/mlx5e: Force CHECKSUM_UNNECESSARY for short ethernet frames tools: bpftool: Cleanup license mess bpf: fix inner map masking to prevent oob under speculation bpf: pull in pkt_sched.h header for tooling to fix bpftool build selftests: forwarding: Add a test case for externally learned FDB entries selftests: mlxsw: Test FDB offload indication mlxsw: spectrum_switchdev: Do not treat static FDB entries as sticky net: bridge: Mark FDB entries that were added by user as such mlxsw: spectrum_fid: Update dummy FID index mlxsw: pci: Return error on PCI reset timeout mlxsw: pci: Increase PCI SW reset timeout mlxsw: pci: Ring CQ's doorbell before RDQ's MAINTAINERS: update email addresses of liquidio driver maintainers ...
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
.section .bpfilter_umh, "a"
|
||||
.section .rodata, "a"
|
||||
.global bpfilter_umh_start
|
||||
bpfilter_umh_start:
|
||||
.incbin "net/bpfilter/bpfilter_umh"
|
||||
|
@@ -1128,6 +1128,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
err = -ENOMEM;
|
||||
goto err_unlock;
|
||||
}
|
||||
if (swdev_notify)
|
||||
fdb->added_by_user = 1;
|
||||
fdb->added_by_external_learn = 1;
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
||||
} else {
|
||||
@@ -1147,6 +1149,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
|
||||
modified = true;
|
||||
}
|
||||
|
||||
if (swdev_notify)
|
||||
fdb->added_by_user = 1;
|
||||
|
||||
if (modified)
|
||||
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
|
||||
}
|
||||
|
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
|
||||
|
||||
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
skb_push(skb, ETH_HLEN);
|
||||
if (!is_skb_forwardable(skb->dev, skb))
|
||||
goto drop;
|
||||
|
||||
skb_push(skb, ETH_HLEN);
|
||||
br_drop_fake_rtable(skb);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
@@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
|
||||
net = dev_net(indev);
|
||||
} else {
|
||||
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
||||
if (!is_skb_forwardable(skb->dev, skb)) {
|
||||
skb_push(skb, ETH_HLEN);
|
||||
if (!is_skb_forwardable(skb->dev, skb))
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
skb_push(skb, ETH_HLEN);
|
||||
else
|
||||
br_netpoll_send_skb(to, skb);
|
||||
}
|
||||
return;
|
||||
}
|
||||
br_hook = NF_BR_LOCAL_OUT;
|
||||
|
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
hdr = ipv6_hdr(skb);
|
||||
}
|
||||
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
|
||||
goto drop;
|
||||
|
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
|
||||
tmp.name[sizeof(tmp.name) - 1] = 0;
|
||||
|
||||
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
|
||||
newinfo = vmalloc(sizeof(*newinfo) + countersize);
|
||||
newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
|
||||
PAGE_KERNEL);
|
||||
if (!newinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
if (countersize)
|
||||
memset(newinfo->counters, 0, countersize);
|
||||
|
||||
newinfo->entries = vmalloc(tmp.entries_size);
|
||||
newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
|
||||
PAGE_KERNEL);
|
||||
if (!newinfo->entries) {
|
||||
ret = -ENOMEM;
|
||||
goto free_newinfo;
|
||||
|
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
|
||||
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
|
||||
return false;
|
||||
|
||||
ip6h = ipv6_hdr(skb);
|
||||
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
|
||||
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
|
||||
return false;
|
||||
|
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
|
||||
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
|
||||
u32 flags)
|
||||
{
|
||||
/* skb->mac_len is not set on normal egress */
|
||||
unsigned int mlen = skb->network_header - skb->mac_header;
|
||||
unsigned int mlen = skb_network_offset(skb);
|
||||
|
||||
__skb_pull(skb, mlen);
|
||||
if (mlen) {
|
||||
__skb_pull(skb, mlen);
|
||||
|
||||
/* At ingress, the mac header has already been pulled once.
|
||||
* At egress, skb_pospull_rcsum has to be done in case that
|
||||
* the skb is originated from ingress (i.e. a forwarded skb)
|
||||
* to ensure that rcsum starts at net header.
|
||||
*/
|
||||
if (!skb_at_tc_ingress(skb))
|
||||
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
||||
/* At ingress, the mac header has already been pulled once.
|
||||
* At egress, skb_pospull_rcsum has to be done in case that
|
||||
* the skb is originated from ingress (i.e. a forwarded skb)
|
||||
* to ensure that rcsum starts at net header.
|
||||
*/
|
||||
if (!skb_at_tc_ingress(skb))
|
||||
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
|
||||
}
|
||||
skb_pop_mac_header(skb);
|
||||
skb_reset_mac_len(skb);
|
||||
return flags & BPF_F_INGRESS ?
|
||||
@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
|
||||
break;
|
||||
case SO_MAX_PACING_RATE: /* 32bit version */
|
||||
if (val != ~0U)
|
||||
cmpxchg(&sk->sk_pacing_status,
|
||||
SK_PACING_NONE,
|
||||
SK_PACING_NEEDED);
|
||||
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
|
||||
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
|
||||
sk->sk_max_pacing_rate);
|
||||
@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
sk->sk_rcvlowat = val ? : 1;
|
||||
break;
|
||||
case SO_MARK:
|
||||
sk->sk_mark = val;
|
||||
if (sk->sk_mark != val) {
|
||||
sk->sk_mark = val;
|
||||
sk_dst_reset(sk);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
case BPF_FUNC_trace_printk:
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
return bpf_get_trace_printk_proto();
|
||||
/* else: fall through */
|
||||
/* else, fall through */
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
|
||||
lwt->name ? : "<unknown>");
|
||||
ret = BPF_OK;
|
||||
} else {
|
||||
skb_reset_mac_header(skb);
|
||||
ret = skb_do_redirect(skb);
|
||||
if (ret == 0)
|
||||
ret = BPF_REDIRECT;
|
||||
|
@@ -450,7 +450,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
|
||||
buckets = (struct neighbour __rcu **)
|
||||
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
|
||||
get_order(size));
|
||||
kmemleak_alloc(buckets, size, 0, GFP_ATOMIC);
|
||||
kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
|
||||
}
|
||||
if (!buckets) {
|
||||
kfree(ret);
|
||||
@@ -1007,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh)
|
||||
if (neigh->ops->solicit)
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
atomic_inc(&neigh->probes);
|
||||
kfree_skb(skb);
|
||||
consume_skb(skb);
|
||||
}
|
||||
|
||||
/* Called when a timer expires for a neighbour entry. */
|
||||
|
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
|
||||
struct fib_table *tb;
|
||||
|
||||
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
|
||||
flushed += fib_table_flush(net, tb);
|
||||
flushed += fib_table_flush(net, tb, false);
|
||||
}
|
||||
|
||||
if (flushed)
|
||||
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
|
||||
|
||||
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
|
||||
hlist_del(&tb->tb_hlist);
|
||||
fib_table_flush(net, tb);
|
||||
fib_table_flush(net, tb, true);
|
||||
fib_free_table(tb);
|
||||
}
|
||||
}
|
||||
|
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
|
||||
}
|
||||
|
||||
/* Caller must hold RTNL. */
|
||||
int fib_table_flush(struct net *net, struct fib_table *tb)
|
||||
int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
|
||||
{
|
||||
struct trie *t = (struct trie *)tb->tb_data;
|
||||
struct key_vector *pn = t->kv;
|
||||
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
|
||||
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
|
||||
struct fib_info *fi = fa->fa_info;
|
||||
|
||||
if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
|
||||
tb->tb_id != fa->tb_id) {
|
||||
if (!fi || tb->tb_id != fa->tb_id ||
|
||||
(!(fi->fib_flags & RTNH_F_DEAD) &&
|
||||
!fib_props[fa->fa_type].error)) {
|
||||
slen = fa->fa_slen;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Do not flush error routes if network namespace is
|
||||
* not being dismantled
|
||||
*/
|
||||
if (!flush_all && fib_props[fa->fa_type].error) {
|
||||
slen = fa->fa_slen;
|
||||
continue;
|
||||
}
|
||||
|
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
|
||||
{
|
||||
int transport_offset = skb_transport_offset(skb);
|
||||
struct guehdr *guehdr;
|
||||
size_t optlen;
|
||||
size_t len, optlen;
|
||||
int ret;
|
||||
|
||||
if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
|
||||
len = sizeof(struct udphdr) + sizeof(struct guehdr);
|
||||
if (!pskb_may_pull(skb, len))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
|
||||
|
||||
optlen = guehdr->hlen << 2;
|
||||
|
||||
if (!pskb_may_pull(skb, len + optlen))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
if (validate_gue_flags(guehdr, optlen))
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -569,8 +569,7 @@ err_free_skb:
|
||||
dev->stats.tx_dropped++;
|
||||
}
|
||||
|
||||
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 proto)
|
||||
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
struct ip_tunnel_info *tun_info;
|
||||
@@ -578,10 +577,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct erspan_metadata *md;
|
||||
struct rtable *rt = NULL;
|
||||
bool truncate = false;
|
||||
__be16 df, proto;
|
||||
struct flowi4 fl;
|
||||
int tunnel_hlen;
|
||||
int version;
|
||||
__be16 df;
|
||||
int nhoff;
|
||||
int thoff;
|
||||
|
||||
@@ -626,18 +625,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
if (version == 1) {
|
||||
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
|
||||
ntohl(md->u.index), truncate, true);
|
||||
proto = htons(ETH_P_ERSPAN);
|
||||
} else if (version == 2) {
|
||||
erspan_build_header_v2(skb,
|
||||
ntohl(tunnel_id_to_key32(key->tun_id)),
|
||||
md->u.md2.dir,
|
||||
get_hwid(&md->u.md2),
|
||||
truncate, true);
|
||||
proto = htons(ETH_P_ERSPAN2);
|
||||
} else {
|
||||
goto err_free_rt;
|
||||
}
|
||||
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ,
|
||||
htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
|
||||
proto, 0, htonl(tunnel->o_seqno++));
|
||||
|
||||
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
||||
|
||||
@@ -721,12 +722,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
bool truncate = false;
|
||||
__be16 proto;
|
||||
|
||||
if (!pskb_inet_may_pull(skb))
|
||||
goto free_skb;
|
||||
|
||||
if (tunnel->collect_md) {
|
||||
erspan_fb_xmit(skb, dev, skb->protocol);
|
||||
erspan_fb_xmit(skb, dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@@ -742,19 +744,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Push ERSPAN header */
|
||||
if (tunnel->erspan_ver == 1)
|
||||
if (tunnel->erspan_ver == 1) {
|
||||
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
|
||||
tunnel->index,
|
||||
truncate, true);
|
||||
else if (tunnel->erspan_ver == 2)
|
||||
proto = htons(ETH_P_ERSPAN);
|
||||
} else if (tunnel->erspan_ver == 2) {
|
||||
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
|
||||
tunnel->dir, tunnel->hwid,
|
||||
truncate, true);
|
||||
else
|
||||
proto = htons(ETH_P_ERSPAN2);
|
||||
} else {
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
tunnel->parms.o_flags &= ~TUNNEL_KEY;
|
||||
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
|
||||
__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
free_skb:
|
||||
|
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
skb->transport_header = skb->network_header + iph->ihl*4;
|
||||
|
||||
/* Remove any debris in the socket control block */
|
||||
|
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
flags = msg->msg_flags;
|
||||
|
||||
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
|
||||
if (sk->sk_state != TCP_ESTABLISHED) {
|
||||
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
||||
err = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
@@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
|
||||
const int hlen = skb_network_header_len(skb) +
|
||||
sizeof(struct udphdr);
|
||||
|
||||
if (hlen + cork->gso_size > cork->fragsize)
|
||||
if (hlen + cork->gso_size > cork->fragsize) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
|
||||
}
|
||||
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
if (sk->sk_no_check_tx)
|
||||
}
|
||||
if (sk->sk_no_check_tx) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
|
||||
dst_xfrm(skb_dst(skb)))
|
||||
dst_xfrm(skb_dst(skb))) {
|
||||
kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
skb_shinfo(skb)->gso_size = cork->gso_size;
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
|
||||
@@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
|
||||
}
|
||||
EXPORT_SYMBOL(udp_lib_rehash);
|
||||
|
||||
static void udp_v4_rehash(struct sock *sk)
|
||||
void udp_v4_rehash(struct sock *sk)
|
||||
{
|
||||
u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
|
||||
inet_sk(sk)->inet_rcv_saddr,
|
||||
|
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
|
||||
int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
|
||||
|
||||
int udp_v4_get_port(struct sock *sk, unsigned short snum);
|
||||
void udp_v4_rehash(struct sock *sk);
|
||||
|
||||
int udp_setsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, unsigned int optlen);
|
||||
|
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
|
||||
.sendpage = udp_sendpage,
|
||||
.hash = udp_lib_hash,
|
||||
.unhash = udp_lib_unhash,
|
||||
.rehash = udp_v4_rehash,
|
||||
.get_port = udp_v4_get_port,
|
||||
.memory_allocated = &udp_memory_allocated,
|
||||
.sysctl_mem = sysctl_udp_mem,
|
||||
|
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
{
|
||||
int transport_offset = skb_transport_offset(skb);
|
||||
struct guehdr *guehdr;
|
||||
size_t optlen;
|
||||
size_t len, optlen;
|
||||
int ret;
|
||||
|
||||
if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
|
||||
len = sizeof(struct udphdr) + sizeof(struct guehdr);
|
||||
if (!pskb_may_pull(skb, len))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
@@ -128,6 +129,10 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
optlen = guehdr->hlen << 2;
|
||||
|
||||
if (!pskb_may_pull(skb, len + optlen))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
if (validate_gue_flags(guehdr, optlen))
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -922,6 +922,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
__u8 dsfield = false;
|
||||
struct flowi6 fl6;
|
||||
int err = -EINVAL;
|
||||
__be16 proto;
|
||||
__u32 mtu;
|
||||
int nhoff;
|
||||
int thoff;
|
||||
@@ -1035,8 +1036,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Push GRE header. */
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ,
|
||||
htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
|
||||
proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
|
||||
: htons(ETH_P_ERSPAN2);
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
|
||||
|
||||
/* TooBig packet may have updated dst->dev's mtu */
|
||||
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
|
||||
@@ -1169,6 +1171,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
|
||||
t->parms.i_flags = p->i_flags;
|
||||
t->parms.o_flags = p->o_flags;
|
||||
t->parms.fwmark = p->fwmark;
|
||||
t->parms.erspan_ver = p->erspan_ver;
|
||||
t->parms.index = p->index;
|
||||
t->parms.dir = p->dir;
|
||||
t->parms.hwid = p->hwid;
|
||||
dst_cache_reset(&t->dst_cache);
|
||||
}
|
||||
|
||||
@@ -2025,9 +2031,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
|
||||
struct __ip6_tnl_parm p;
|
||||
struct ip6_tnl *t;
|
||||
|
||||
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
|
||||
if (IS_ERR(t))
|
||||
|
@@ -4251,17 +4251,6 @@ struct rt6_nh {
|
||||
struct list_head next;
|
||||
};
|
||||
|
||||
static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
|
||||
{
|
||||
struct rt6_nh *nh;
|
||||
|
||||
list_for_each_entry(nh, rt6_nh_list, next) {
|
||||
pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
|
||||
&nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
|
||||
nh->r_cfg.fc_ifindex);
|
||||
}
|
||||
}
|
||||
|
||||
static int ip6_route_info_append(struct net *net,
|
||||
struct list_head *rt6_nh_list,
|
||||
struct fib6_info *rt,
|
||||
@@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
|
||||
nh->fib6_info = NULL;
|
||||
if (err) {
|
||||
if (replace && nhn)
|
||||
ip6_print_replace_route_err(&rt6_nh_list);
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"multipath route replace failed (check consistency of installed routes)");
|
||||
err_nh = nh;
|
||||
goto add_errout;
|
||||
}
|
||||
|
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
|
||||
return udp_lib_get_port(sk, snum, hash2_nulladdr);
|
||||
}
|
||||
|
||||
static void udp_v6_rehash(struct sock *sk)
|
||||
void udp_v6_rehash(struct sock *sk)
|
||||
{
|
||||
u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
|
||||
&sk->sk_v6_rcv_saddr,
|
||||
@@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
|
||||
const int hlen = skb_network_header_len(skb) +
|
||||
sizeof(struct udphdr);
|
||||
|
||||
if (hlen + cork->gso_size > cork->fragsize)
|
||||
if (hlen + cork->gso_size > cork->fragsize) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
|
||||
}
|
||||
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
if (udp_sk(sk)->no_check6_tx)
|
||||
}
|
||||
if (udp_sk(sk)->no_check6_tx) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
|
||||
dst_xfrm(skb_dst(skb)))
|
||||
dst_xfrm(skb_dst(skb))) {
|
||||
kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
skb_shinfo(skb)->gso_size = cork->gso_size;
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
|
||||
|
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
|
||||
__be32, struct udp_table *);
|
||||
|
||||
int udp_v6_get_port(struct sock *sk, unsigned short snum);
|
||||
void udp_v6_rehash(struct sock *sk);
|
||||
|
||||
int udpv6_getsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, int __user *optlen);
|
||||
|
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
|
||||
.recvmsg = udpv6_recvmsg,
|
||||
.hash = udp_lib_hash,
|
||||
.unhash = udp_lib_unhash,
|
||||
.rehash = udp_v6_rehash,
|
||||
.get_port = udp_v6_get_port,
|
||||
.memory_allocated = &udp_memory_allocated,
|
||||
.sysctl_mem = sysctl_udp_mem,
|
||||
|
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
||||
{
|
||||
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
|
||||
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
|
||||
struct dst_entry *other_dst = route->tuple[!dir].dst;
|
||||
struct dst_entry *dst = route->tuple[dir].dst;
|
||||
|
||||
ft->dir = dir;
|
||||
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
|
||||
ft->src_port = ctt->src.u.tcp.port;
|
||||
ft->dst_port = ctt->dst.u.tcp.port;
|
||||
|
||||
ft->iifidx = route->tuple[dir].ifindex;
|
||||
ft->oifidx = route->tuple[!dir].ifindex;
|
||||
ft->iifidx = other_dst->dev->ifindex;
|
||||
ft->oifidx = dst->dev->ifindex;
|
||||
ft->dst_cache = dst;
|
||||
}
|
||||
|
||||
|
@@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
|
||||
struct net *net = sock_net(skb->sk);
|
||||
unsigned int s_idx = cb->args[0];
|
||||
const struct nft_rule *rule;
|
||||
int rc = 1;
|
||||
|
||||
list_for_each_entry_rcu(rule, &chain->rules, list) {
|
||||
if (!nft_is_active(net, rule))
|
||||
@@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
|
||||
NLM_F_MULTI | NLM_F_APPEND,
|
||||
table->family,
|
||||
table, chain, rule) < 0)
|
||||
goto out_unfinished;
|
||||
return 1;
|
||||
|
||||
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
|
||||
cont:
|
||||
(*idx)++;
|
||||
}
|
||||
rc = 0;
|
||||
out_unfinished:
|
||||
cb->args[0] = *idx;
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nf_tables_dump_rules(struct sk_buff *skb,
|
||||
@@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
|
||||
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
|
||||
continue;
|
||||
|
||||
if (ctx && ctx->chain) {
|
||||
if (ctx && ctx->table && ctx->chain) {
|
||||
struct rhlist_head *list, *tmp;
|
||||
|
||||
list = rhltable_lookup(&table->chains_ht, ctx->chain,
|
||||
@@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
|
||||
}
|
||||
done:
|
||||
rcu_read_unlock();
|
||||
|
||||
cb->args[0] = idx;
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
@@ -4508,6 +4506,8 @@ err6:
|
||||
err5:
|
||||
kfree(trans);
|
||||
err4:
|
||||
if (obj)
|
||||
obj->use--;
|
||||
kfree(elem.priv);
|
||||
err3:
|
||||
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <linux/netfilter/nf_conntrack_common.h>
|
||||
#include <net/netfilter/nf_flow_table.h>
|
||||
#include <net/netfilter/nf_conntrack_helper.h>
|
||||
|
||||
struct nft_flow_offload {
|
||||
struct nft_flowtable *flowtable;
|
||||
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
switch (nft_pf(pkt)) {
|
||||
case NFPROTO_IPV4:
|
||||
fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
|
||||
fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
|
||||
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
|
||||
break;
|
||||
case NFPROTO_IPV6:
|
||||
fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
|
||||
fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
|
||||
fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
|
||||
return -ENOENT;
|
||||
|
||||
route->tuple[dir].dst = this_dst;
|
||||
route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
|
||||
route->tuple[!dir].dst = other_dst;
|
||||
route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
||||
{
|
||||
struct nft_flow_offload *priv = nft_expr_priv(expr);
|
||||
struct nf_flowtable *flowtable = &priv->flowtable->data;
|
||||
const struct nf_conn_help *help;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_flow_route route;
|
||||
struct flow_offload *flow;
|
||||
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_bit(IPS_HELPER_BIT, &ct->status))
|
||||
help = nfct_help(ct);
|
||||
if (help)
|
||||
goto out;
|
||||
|
||||
if (ctinfo == IP_CT_NEW ||
|
||||
|
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
|
||||
if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
|
||||
attrs |= 1 << type;
|
||||
a[type] = nla;
|
||||
}
|
||||
|
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
goto out_free;
|
||||
} else if (reserve) {
|
||||
skb_reserve(skb, -reserve);
|
||||
if (len < reserve)
|
||||
if (len < reserve + sizeof(struct ipv6hdr) &&
|
||||
dev->min_header_len != dev->hard_header_len)
|
||||
skb_reset_network_header(skb);
|
||||
}
|
||||
|
||||
|
@@ -418,76 +418,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_check_call - Check a call's state
|
||||
* @sock: The socket the call is on
|
||||
* @call: The call to check
|
||||
* @_compl: Where to store the completion state
|
||||
* @_abort_code: Where to store any abort code
|
||||
*
|
||||
* Allow a kernel service to query the state of a call and find out the manner
|
||||
* of its termination if it has completed. Returns -EINPROGRESS if the call is
|
||||
* still going, 0 if the call finished successfully, -ECONNABORTED if the call
|
||||
* was aborted and an appropriate error if the call failed in some other way.
|
||||
*/
|
||||
int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
|
||||
enum rxrpc_call_completion *_compl, u32 *_abort_code)
|
||||
{
|
||||
if (call->state != RXRPC_CALL_COMPLETE)
|
||||
return -EINPROGRESS;
|
||||
smp_rmb();
|
||||
*_compl = call->completion;
|
||||
*_abort_code = call->abort_code;
|
||||
return call->error;
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_check_call);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_retry_call - Allow a kernel service to retry a call
|
||||
* @sock: The socket the call is on
|
||||
* @call: The call to retry
|
||||
* @srx: The address of the peer to contact
|
||||
* @key: The security context to use (defaults to socket setting)
|
||||
*
|
||||
* Allow a kernel service to try resending a client call that failed due to a
|
||||
* network error to a new address. The Tx queue is maintained intact, thereby
|
||||
* relieving the need to re-encrypt any request data that has already been
|
||||
* buffered.
|
||||
*/
|
||||
int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
|
||||
struct sockaddr_rxrpc *srx, struct key *key)
|
||||
{
|
||||
struct rxrpc_conn_parameters cp;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
int ret;
|
||||
|
||||
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
|
||||
|
||||
if (!key)
|
||||
key = rx->key;
|
||||
if (key && !key->payload.data[0])
|
||||
key = NULL; /* a no-security key */
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.local = rx->local;
|
||||
cp.key = key;
|
||||
cp.security_level = 0;
|
||||
cp.exclusive = false;
|
||||
cp.service_id = srx->srx_service;
|
||||
|
||||
mutex_lock(&call->user_mutex);
|
||||
|
||||
ret = rxrpc_prepare_call_for_retry(rx, call);
|
||||
if (ret == 0)
|
||||
ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
|
||||
|
||||
mutex_unlock(&call->user_mutex);
|
||||
rxrpc_put_peer(cp.peer);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rxrpc_kernel_retry_call);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_new_call_notification - Get notifications of new calls
|
||||
* @sock: The socket to intercept received messages on
|
||||
|
@@ -476,7 +476,6 @@ enum rxrpc_call_flag {
|
||||
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
|
||||
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
|
||||
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
|
||||
RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
|
||||
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
|
||||
RXRPC_CALL_PINGING, /* Ping in process */
|
||||
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
|
||||
@@ -517,6 +516,18 @@ enum rxrpc_call_state {
|
||||
NR__RXRPC_CALL_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* Call completion condition (state == RXRPC_CALL_COMPLETE).
|
||||
*/
|
||||
enum rxrpc_call_completion {
|
||||
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
|
||||
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
||||
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
||||
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
|
||||
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
||||
NR__RXRPC_CALL_COMPLETIONS
|
||||
};
|
||||
|
||||
/*
|
||||
* Call Tx congestion management modes.
|
||||
*/
|
||||
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
|
||||
struct sockaddr_rxrpc *,
|
||||
struct rxrpc_call_params *, gfp_t,
|
||||
unsigned int);
|
||||
int rxrpc_retry_client_call(struct rxrpc_sock *,
|
||||
struct rxrpc_call *,
|
||||
struct rxrpc_conn_parameters *,
|
||||
struct sockaddr_rxrpc *,
|
||||
gfp_t);
|
||||
void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
|
||||
struct sk_buff *);
|
||||
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
|
||||
int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
|
||||
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
|
||||
bool __rxrpc_queue_call(struct rxrpc_call *);
|
||||
bool rxrpc_queue_call(struct rxrpc_call *);
|
||||
|
@@ -324,48 +324,6 @@ error:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retry a call to a new address. It is expected that the Tx queue of the call
|
||||
* will contain data previously packaged for an old call.
|
||||
*/
|
||||
int rxrpc_retry_client_call(struct rxrpc_sock *rx,
|
||||
struct rxrpc_call *call,
|
||||
struct rxrpc_conn_parameters *cp,
|
||||
struct sockaddr_rxrpc *srx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int ret;
|
||||
|
||||
/* Set up or get a connection record and set the protocol parameters,
|
||||
* including channel number and call ID.
|
||||
*/
|
||||
ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
|
||||
here, NULL);
|
||||
|
||||
rxrpc_start_call_timer(call);
|
||||
|
||||
_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
|
||||
|
||||
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
error:
|
||||
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
|
||||
RX_CALL_DEAD, ret);
|
||||
trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
|
||||
here, ERR_PTR(ret));
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up an incoming call. call->conn points to the connection.
|
||||
* This is called in BH context and isn't allowed to fail.
|
||||
@@ -533,61 +491,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a kernel service call for retry.
|
||||
*/
|
||||
int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int i;
|
||||
u8 last = 0;
|
||||
|
||||
_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
|
||||
|
||||
trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
|
||||
here, (const void *)call->flags);
|
||||
|
||||
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
|
||||
ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
|
||||
ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
|
||||
ASSERT(list_empty(&call->recvmsg_link));
|
||||
|
||||
del_timer_sync(&call->timer);
|
||||
|
||||
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
|
||||
|
||||
if (call->conn)
|
||||
rxrpc_disconnect_call(call);
|
||||
|
||||
if (rxrpc_is_service_call(call) ||
|
||||
!call->tx_phase ||
|
||||
call->tx_hard_ack != 0 ||
|
||||
call->rx_hard_ack != 0 ||
|
||||
call->rx_top != 0)
|
||||
return -EINVAL;
|
||||
|
||||
call->state = RXRPC_CALL_UNINITIALISED;
|
||||
call->completion = RXRPC_CALL_SUCCEEDED;
|
||||
call->call_id = 0;
|
||||
call->cid = 0;
|
||||
call->cong_cwnd = 0;
|
||||
call->cong_extra = 0;
|
||||
call->cong_ssthresh = 0;
|
||||
call->cong_mode = 0;
|
||||
call->cong_dup_acks = 0;
|
||||
call->cong_cumul_acks = 0;
|
||||
call->acks_lowest_nak = 0;
|
||||
|
||||
for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
|
||||
last |= call->rxtx_annotations[i];
|
||||
call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
|
||||
call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* release all the calls associated with a socket
|
||||
*/
|
||||
|
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
|
||||
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
|
||||
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
|
||||
else
|
||||
call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
|
||||
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
|
||||
write_unlock_bh(&call->state_lock);
|
||||
|
||||
rxrpc_see_call(call);
|
||||
|
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
|
||||
|
||||
ASSERTCMP(seq, ==, call->tx_top + 1);
|
||||
|
||||
if (last) {
|
||||
if (last)
|
||||
annotation |= RXRPC_TX_ANNO_LAST;
|
||||
set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
|
||||
}
|
||||
|
||||
/* We have to set the timestamp before queueing as the retransmit
|
||||
* algorithm can see the packet as soon as we queue it.
|
||||
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
||||
call->tx_total_len -= copy;
|
||||
}
|
||||
|
||||
/* check for the far side aborting the call or a network error
|
||||
* occurring */
|
||||
if (call->state == RXRPC_CALL_COMPLETE)
|
||||
goto call_terminated;
|
||||
|
||||
/* add the packet to the send queue if it's now full */
|
||||
if (sp->remain <= 0 ||
|
||||
(msg_data_left(msg) == 0 && !more)) {
|
||||
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
|
||||
notify_end_tx);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
/* Check for the far side aborting the call or a network error
|
||||
* occurring. If this happens, save any packet that was under
|
||||
* construction so that in the case of a network error, the
|
||||
* call can be retried or redirected.
|
||||
*/
|
||||
if (call->state == RXRPC_CALL_COMPLETE) {
|
||||
ret = call->error;
|
||||
goto out;
|
||||
}
|
||||
} while (msg_data_left(msg) > 0);
|
||||
|
||||
success:
|
||||
@@ -444,6 +437,11 @@ out:
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
call_terminated:
|
||||
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
|
||||
_leave(" = %d", call->error);
|
||||
return call->error;
|
||||
|
||||
maybe_error:
|
||||
if (copied)
|
||||
goto success;
|
||||
|
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
|
||||
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(&p->tcft_enc_metadata->dst);
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **a,
|
||||
int ovr, int bind, bool rtnl_held,
|
||||
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
rcu_swap_protected(t->params, params_new,
|
||||
lockdep_is_held(&t->tcf_lock));
|
||||
spin_unlock_bh(&t->tcf_lock);
|
||||
if (params_new)
|
||||
kfree_rcu(params_new, rcu);
|
||||
tunnel_key_release_params(params_new);
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_insert(tn, *a);
|
||||
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
|
||||
struct tcf_tunnel_key_params *params;
|
||||
|
||||
params = rcu_dereference_protected(t->params, 1);
|
||||
if (params) {
|
||||
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(¶ms->tcft_enc_metadata->dst);
|
||||
|
||||
kfree_rcu(params, rcu);
|
||||
}
|
||||
tunnel_key_release_params(params);
|
||||
}
|
||||
|
||||
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
|
||||
|
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
const int max_reclassify_loop = 4;
|
||||
const struct tcf_proto *orig_tp = tp;
|
||||
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
reclassify:
|
||||
#endif
|
||||
for (; tp; tp = rcu_dereference_bh(tp->next)) {
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
int err;
|
||||
|
||||
if (tp->protocol != protocol &&
|
||||
@@ -1319,7 +1319,6 @@ reset:
|
||||
}
|
||||
|
||||
tp = first_tp;
|
||||
protocol = tc_skb_protocol(skb);
|
||||
goto reclassify;
|
||||
#endif
|
||||
}
|
||||
|
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
struct cls_fl_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_fl_filter *fold = *arg;
|
||||
struct cls_fl_filter *fnew;
|
||||
struct fl_flow_mask *mask;
|
||||
struct nlattr **tb;
|
||||
struct fl_flow_mask mask = {};
|
||||
int err;
|
||||
|
||||
if (!tca[TCA_OPTIONS])
|
||||
return -EINVAL;
|
||||
|
||||
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
||||
if (!tb)
|
||||
mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
|
||||
if (!mask)
|
||||
return -ENOBUFS;
|
||||
|
||||
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
||||
if (!tb) {
|
||||
err = -ENOBUFS;
|
||||
goto errout_mask_alloc;
|
||||
}
|
||||
|
||||
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
|
||||
fl_policy, NULL);
|
||||
if (err < 0)
|
||||
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
}
|
||||
}
|
||||
|
||||
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
|
||||
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
|
||||
tp->chain->tmplt_priv, extack);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
err = fl_check_assign_mask(head, fnew, fold, &mask);
|
||||
err = fl_check_assign_mask(head, fnew, fold, mask);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
@@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
}
|
||||
|
||||
kfree(tb);
|
||||
kfree(mask);
|
||||
return 0;
|
||||
|
||||
errout_mask:
|
||||
@@ -1405,6 +1412,8 @@ errout:
|
||||
kfree(fnew);
|
||||
errout_tb:
|
||||
kfree(tb);
|
||||
errout_mask_alloc:
|
||||
kfree(mask);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
|
||||
struct sk_buff *segs, *nskb;
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
unsigned int slen = 0;
|
||||
unsigned int slen = 0, numsegs = 0;
|
||||
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR_OR_NULL(segs))
|
||||
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
flow_queue_add(flow, segs);
|
||||
|
||||
sch->q.qlen++;
|
||||
numsegs++;
|
||||
slen += segs->len;
|
||||
q->buffer_used += segs->truesize;
|
||||
b->packets++;
|
||||
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
sch->qstats.backlog += slen;
|
||||
q->avg_window_bytes += slen;
|
||||
|
||||
qdisc_tree_reduce_backlog(sch, 1, len);
|
||||
qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
/* not splitting */
|
||||
|
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct Qdisc *child,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
int err;
|
||||
|
||||
err = child->ops->enqueue(skb, child, to_free);
|
||||
if (err != NET_XMIT_SUCCESS)
|
||||
return err;
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
||||
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct drr_sched *q = qdisc_priv(sch);
|
||||
struct drr_class *cl;
|
||||
int err = 0;
|
||||
bool first;
|
||||
|
||||
cl = drr_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cl->qdisc->q.qlen == 1) {
|
||||
if (first) {
|
||||
list_add_tail(&cl->alist, &q->active);
|
||||
cl->deficit = cl->quantum;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return err;
|
||||
}
|
||||
|
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
|
||||
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
||||
int err;
|
||||
|
||||
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
|
||||
static int
|
||||
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct hfsc_class *cl;
|
||||
int uninitialized_var(err);
|
||||
bool first;
|
||||
|
||||
cl = hfsc_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cl->qdisc->q.qlen == 1) {
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
|
||||
if (first) {
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
init_ed(cl, len);
|
||||
if (cl->cl_flags & HFSC_FSC)
|
||||
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
int uninitialized_var(ret);
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct htb_sched *q = qdisc_priv(sch);
|
||||
struct htb_class *cl = htb_classify(skb, sch, &ret);
|
||||
|
||||
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
htb_activate(q, cl);
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
||||
static int
|
||||
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct Qdisc *qdisc;
|
||||
int ret;
|
||||
|
||||
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
ret = qdisc_enqueue(skb, qdisc, to_free);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
|
||||
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb), gso_segs;
|
||||
struct qfq_sched *q = qdisc_priv(sch);
|
||||
struct qfq_class *cl;
|
||||
struct qfq_aggregate *agg;
|
||||
int err = 0;
|
||||
bool first;
|
||||
|
||||
cl = qfq_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
}
|
||||
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
|
||||
|
||||
if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
|
||||
if (unlikely(cl->agg->lmax < len)) {
|
||||
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
|
||||
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
|
||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
|
||||
qdisc_pkt_len(skb));
|
||||
cl->agg->lmax, len, cl->common.classid);
|
||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
|
||||
if (err) {
|
||||
cl->qstats.drops++;
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
}
|
||||
}
|
||||
|
||||
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
cl->bstats.bytes += len;
|
||||
cl->bstats.packets += gso_segs;
|
||||
sch->qstats.backlog += len;
|
||||
++sch->q.qlen;
|
||||
|
||||
agg = cl->agg;
|
||||
/* if the queue was not empty, then done here */
|
||||
if (cl->qdisc->q.qlen != 1) {
|
||||
if (!first) {
|
||||
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
||||
list_first_entry(&agg->active, struct qfq_class, alist)
|
||||
== cl && cl->deficit < qdisc_pkt_len(skb))
|
||||
== cl && cl->deficit < len)
|
||||
list_move_tail(&cl->alist, &agg->active);
|
||||
|
||||
return err;
|
||||
|
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
int ret;
|
||||
|
||||
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return ret;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
|
||||
|
||||
switch (ev) {
|
||||
case NETDEV_UP:
|
||||
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
||||
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v6.sin6_family = AF_INET6;
|
||||
addr->a.v6.sin6_port = 0;
|
||||
addr->a.v6.sin6_flowinfo = 0;
|
||||
addr->a.v6.sin6_addr = ifa->addr;
|
||||
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
|
||||
addr->valid = 1;
|
||||
@@ -434,7 +432,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
|
||||
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v6.sin6_family = AF_INET6;
|
||||
addr->a.v6.sin6_port = 0;
|
||||
addr->a.v6.sin6_addr = ifp->addr;
|
||||
addr->a.v6.sin6_scope_id = dev->ifindex;
|
||||
addr->valid = 1;
|
||||
|
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
|
||||
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v4.sin_family = AF_INET;
|
||||
addr->a.v4.sin_port = 0;
|
||||
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
|
||||
addr->valid = 1;
|
||||
INIT_LIST_HEAD(&addr->list);
|
||||
@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
|
||||
|
||||
switch (ev) {
|
||||
case NETDEV_UP:
|
||||
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
||||
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v4.sin_family = AF_INET;
|
||||
addr->a.v4.sin_port = 0;
|
||||
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
|
||||
addr->valid = 1;
|
||||
spin_lock_bh(&net->sctp.local_addr_lock);
|
||||
|
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
|
||||
return limit;
|
||||
}
|
||||
|
||||
static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
|
||||
{
|
||||
return TLV_GET_LEN(tlv) - TLV_SPACE(0);
|
||||
}
|
||||
|
||||
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
|
||||
{
|
||||
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
|
||||
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
|
||||
return buf;
|
||||
}
|
||||
|
||||
static inline bool string_is_valid(char *s, int len)
|
||||
{
|
||||
return memchr(s, '\0', len) ? true : false;
|
||||
}
|
||||
|
||||
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
|
||||
struct tipc_nl_compat_msg *msg,
|
||||
struct sk_buff *arg)
|
||||
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
struct nlattr *prop;
|
||||
struct nlattr *bearer;
|
||||
struct tipc_bearer_config *b;
|
||||
int len;
|
||||
|
||||
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
|
||||
|
||||
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
if (!bearer)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(b->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
|
||||
return -EMSGSIZE;
|
||||
|
||||
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
{
|
||||
char *name;
|
||||
struct nlattr *bearer;
|
||||
int len;
|
||||
|
||||
name = (char *)TLV_DATA(msg->req);
|
||||
|
||||
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
if (!bearer)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
|
||||
return -EMSGSIZE;
|
||||
|
||||
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
||||
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
|
||||
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
|
||||
int err;
|
||||
int len;
|
||||
|
||||
if (!attrs[TIPC_NLA_LINK])
|
||||
return -EINVAL;
|
||||
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
||||
return err;
|
||||
|
||||
name = (char *)TLV_DATA(msg->req);
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
|
||||
return 0;
|
||||
|
||||
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
|
||||
struct nlattr *prop;
|
||||
struct nlattr *media;
|
||||
struct tipc_link_config *lc;
|
||||
int len;
|
||||
|
||||
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
||||
|
||||
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
|
||||
if (!media)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
|
||||
if (!string_is_valid(lc->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
|
||||
return -EMSGSIZE;
|
||||
|
||||
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
|
||||
struct nlattr *prop;
|
||||
struct nlattr *bearer;
|
||||
struct tipc_link_config *lc;
|
||||
int len;
|
||||
|
||||
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
||||
|
||||
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
|
||||
if (!bearer)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
|
||||
if (!string_is_valid(lc->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
|
||||
return -EMSGSIZE;
|
||||
|
||||
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
struct tipc_link_config *lc;
|
||||
struct tipc_bearer *bearer;
|
||||
struct tipc_media *media;
|
||||
int len;
|
||||
|
||||
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(lc->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
media = tipc_media_find(lc->name);
|
||||
if (media) {
|
||||
cmd->doit = &__tipc_nl_media_set;
|
||||
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
{
|
||||
char *name;
|
||||
struct nlattr *link;
|
||||
int len;
|
||||
|
||||
name = (char *)TLV_DATA(msg->req);
|
||||
|
||||
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
if (!link)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
|
||||
return -EMSGSIZE;
|
||||
|
||||
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
|
||||
};
|
||||
|
||||
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
|
||||
if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
|
||||
return -EINVAL;
|
||||
|
||||
depth = ntohl(ntq->depth);
|
||||
|
||||
@@ -1208,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
|
||||
if (len && !TLV_OK(msg.req, len)) {
|
||||
if (!len || !TLV_OK(msg.req, len)) {
|
||||
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
|
||||
err = -EOPNOTSUPP;
|
||||
goto send;
|
||||
|
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
|
||||
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
return -EWOULDBLOCK;
|
||||
if (ret > 0) {
|
||||
if (ret == sizeof(s)) {
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
ret = tipc_conn_rcv_sub(srv, con, &s);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
||||
* not know if the device has more tx queues than rx, or the opposite.
|
||||
* This might also change during run time.
|
||||
*/
|
||||
static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
|
||||
u16 queue_id)
|
||||
static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
|
||||
u16 queue_id)
|
||||
{
|
||||
if (queue_id >= max_t(unsigned int,
|
||||
dev->real_num_rx_queues,
|
||||
dev->real_num_tx_queues))
|
||||
return -EINVAL;
|
||||
|
||||
if (queue_id < dev->real_num_rx_queues)
|
||||
dev->_rx[queue_id].umem = umem;
|
||||
if (queue_id < dev->real_num_tx_queues)
|
||||
dev->_tx[queue_id].umem = umem;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
|
||||
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||
goto out_rtnl_unlock;
|
||||
}
|
||||
|
||||
xdp_reg_umem_at_qid(dev, umem, queue_id);
|
||||
err = xdp_reg_umem_at_qid(dev, umem, queue_id);
|
||||
if (err)
|
||||
goto out_rtnl_unlock;
|
||||
|
||||
umem->dev = dev;
|
||||
umem->queue_id = queue_id;
|
||||
if (force_copy)
|
||||
|
Reference in New Issue
Block a user