Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "First batch of fixes in the new merge window: 1) Double dst_cache free in act_tunnel_key, from Wenxu. 2) Avoid NULL deref in IN_DEV_MFORWARD() by failing early in the ip_route_input_rcu() path, from Paolo Abeni. 3) Fix appletalk compile regression, from Arnd Bergmann. 4) If SLAB objects reach the TCP sendpage method we are in serious trouble, so put a debugging check there. From Vasily Averin. 5) Memory leak in hsr layer, from Mao Wenan. 6) Only test GSO type on GSO packets, from Willem de Bruijn. 7) Fix crash in xsk_diag_put_umem(), from Eric Dumazet. 8) Fix VNIC mailbox length in nfp, from Dirk van der Merwe. 9) Fix race in ipv4 route exception handling, from Xin Long. 10) Missing DMA memory barrier in hns3 driver, from Jian Shen. 11) Use after free in __tcf_chain_put(), from Vlad Buslov. 12) Handle inet_csk_reqsk_queue_add() failures, from Guillaume Nault. 13) Return value correction when ip_mc_may_pull() fails, from Eric Dumazet. 14) Use after free in x25_device_event(), also from Eric" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits) gro_cells: make sure device is up in gro_cells_receive() vxlan: test dev->flags & IFF_UP before calling gro_cells_receive() net/x25: fix use-after-free in x25_device_event() isdn: mISDNinfineon: fix potential NULL pointer dereference net: hns3: fix to stop multiple HNS reset due to the AER changes ip: fix ip_mc_may_pull() return value net: keep refcount warning in reqsk_free() net: stmmac: Avoid one more sometimes uninitialized Clang warning net: dsa: mv88e6xxx: Set correct interface mode for CPU/DSA ports rxrpc: Fix client call queueing, waiting for channel tcp: handle inet_csk_reqsk_queue_add() failures net: ethernet: sun: Zero initialize class in default case in niu_add_ethtool_tcam_entry 8139too : Add support for U.S. Robotics USR997901A 10/100 Cardbus NIC fou, fou6: avoid uninit-value in gue_err() and gue6_err() net: sched: fix potential use-after-free in __tcf_chain_put() vhost: silence an unused-variable warning vsock/virtio: fix kernel panic from virtio_transport_reset_no_sock connector: fix unsafe usage of ->real_parent vxlan: do not need BH again in vxlan_cleanup() net: hns3: add dma_rmb() for rx description ...
This commit is contained in:
@@ -203,13 +203,9 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
|
||||
int __user *sioc_len;
|
||||
int __user *iobuf_len;
|
||||
|
||||
#ifndef CONFIG_COMPAT
|
||||
compat = 0; /* Just so the compiler _knows_ */
|
||||
#endif
|
||||
|
||||
switch (cmd) {
|
||||
case ATM_GETNAMES:
|
||||
if (compat) {
|
||||
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_atm_iobuf __user *ciobuf = arg;
|
||||
compat_uptr_t cbuf;
|
||||
@@ -253,7 +249,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
|
||||
break;
|
||||
}
|
||||
|
||||
if (compat) {
|
||||
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_atmif_sioc __user *csioc = arg;
|
||||
compat_uptr_t carg;
|
||||
@@ -417,7 +413,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
if (compat) {
|
||||
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (!dev->ops->compat_ioctl) {
|
||||
error = -EINVAL;
|
||||
|
@@ -16,7 +16,7 @@
|
||||
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
|
||||
u32 *retval, u32 *time)
|
||||
{
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
|
||||
enum bpf_cgroup_storage_type stype;
|
||||
u64 time_start, time_spent = 0;
|
||||
int ret = 0;
|
||||
|
@@ -2319,9 +2319,10 @@ static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_get_per_queue_coalesce(struct net_device *dev,
|
||||
void __user *useraddr,
|
||||
struct ethtool_per_queue_op *per_queue_opt)
|
||||
static noinline_for_stack int
|
||||
ethtool_get_per_queue_coalesce(struct net_device *dev,
|
||||
void __user *useraddr,
|
||||
struct ethtool_per_queue_op *per_queue_opt)
|
||||
{
|
||||
u32 bit;
|
||||
int ret;
|
||||
@@ -2349,9 +2350,10 @@ static int ethtool_get_per_queue_coalesce(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ethtool_set_per_queue_coalesce(struct net_device *dev,
|
||||
void __user *useraddr,
|
||||
struct ethtool_per_queue_op *per_queue_opt)
|
||||
static noinline_for_stack int
|
||||
ethtool_set_per_queue_coalesce(struct net_device *dev,
|
||||
void __user *useraddr,
|
||||
struct ethtool_per_queue_op *per_queue_opt)
|
||||
{
|
||||
u32 bit;
|
||||
int i, ret = 0;
|
||||
@@ -2405,7 +2407,7 @@ roll_back:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethtool_set_per_queue(struct net_device *dev,
|
||||
static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
|
||||
void __user *useraddr, u32 sub_cmd)
|
||||
{
|
||||
struct ethtool_per_queue_op per_queue_opt;
|
||||
|
@@ -2804,7 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
|
||||
u32 off = skb_mac_header_len(skb);
|
||||
int ret;
|
||||
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_cow(skb, len_diff);
|
||||
@@ -2845,7 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
|
||||
u32 off = skb_mac_header_len(skb);
|
||||
int ret;
|
||||
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_unclone(skb, GFP_ATOMIC);
|
||||
@@ -2970,7 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
|
||||
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
||||
int ret;
|
||||
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_cow(skb, len_diff);
|
||||
@@ -2999,7 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
|
||||
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
|
||||
int ret;
|
||||
|
||||
if (!skb_is_gso_tcp(skb))
|
||||
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = skb_unclone(skb, GFP_ATOMIC);
|
||||
|
@@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
struct gro_cell *cell;
|
||||
int res;
|
||||
|
||||
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
|
||||
return netif_rx(skb);
|
||||
rcu_read_lock();
|
||||
if (unlikely(!(dev->flags & IFF_UP)))
|
||||
goto drop;
|
||||
|
||||
if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
|
||||
res = netif_rx(skb);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cell = this_cpu_ptr(gcells->cells);
|
||||
|
||||
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
|
||||
drop:
|
||||
atomic_long_inc(&dev->rx_dropped);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
res = NET_RX_DROP;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
__skb_queue_tail(&cell->napi_skbs, skb);
|
||||
if (skb_queue_len(&cell->napi_skbs) == 1)
|
||||
napi_schedule(&cell->napi);
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
res = NET_RX_SUCCESS;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(gro_cells_receive);
|
||||
|
||||
|
@@ -625,6 +625,8 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
|
||||
|
||||
/* push the encap headers and fix pointers */
|
||||
skb_reset_inner_headers(skb);
|
||||
skb_reset_inner_mac_header(skb); /* mac header is not yet set */
|
||||
skb_set_inner_protocol(skb, skb->protocol);
|
||||
skb->encapsulation = 1;
|
||||
skb_push(skb, len);
|
||||
if (ingress)
|
||||
|
@@ -554,6 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
|
||||
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
|
||||
|
||||
/* No sk_callback_lock since already detached. */
|
||||
strp_stop(&psock->parser.strp);
|
||||
strp_done(&psock->parser.strp);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
|
@@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
|
||||
&& (old_operstate != IF_OPER_UP)) {
|
||||
/* Went up */
|
||||
hsr->announce_count = 0;
|
||||
hsr->announce_timer.expires = jiffies +
|
||||
msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
|
||||
add_timer(&hsr->announce_timer);
|
||||
mod_timer(&hsr->announce_timer,
|
||||
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
|
||||
}
|
||||
|
||||
if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
|
||||
@@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
|
||||
{
|
||||
struct hsr_priv *hsr;
|
||||
struct hsr_port *master;
|
||||
unsigned long interval;
|
||||
|
||||
hsr = from_timer(hsr, t, announce_timer);
|
||||
|
||||
@@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
|
||||
hsr->protVersion);
|
||||
hsr->announce_count++;
|
||||
|
||||
hsr->announce_timer.expires = jiffies +
|
||||
msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
|
||||
interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
|
||||
} else {
|
||||
send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
|
||||
hsr->protVersion);
|
||||
|
||||
hsr->announce_timer.expires = jiffies +
|
||||
msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
|
||||
interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
|
||||
}
|
||||
|
||||
if (is_admin_up(master->dev))
|
||||
add_timer(&hsr->announce_timer);
|
||||
mod_timer(&hsr->announce_timer, jiffies + interval);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
|
||||
|
||||
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
|
||||
if (res)
|
||||
return res;
|
||||
goto err_add_port;
|
||||
|
||||
res = register_netdevice(hsr_dev);
|
||||
if (res)
|
||||
@@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
|
||||
fail:
|
||||
hsr_for_each_port(hsr, port)
|
||||
hsr_del_port(port);
|
||||
err_add_port:
|
||||
hsr_del_node(&hsr->self_node_db);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hsr_del_node(struct list_head *self_node_db)
|
||||
{
|
||||
struct hsr_node *node;
|
||||
|
||||
rcu_read_lock();
|
||||
node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
|
||||
rcu_read_unlock();
|
||||
if (node) {
|
||||
list_del_rcu(&node->mac_list);
|
||||
kfree(node);
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
|
||||
* seq_out is used to initialize filtering of outgoing duplicate frames
|
||||
|
@@ -16,6 +16,7 @@
|
||||
|
||||
struct hsr_node;
|
||||
|
||||
void hsr_del_node(struct list_head *self_node_db);
|
||||
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
|
||||
u16 seq_out);
|
||||
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
|
||||
|
@@ -1024,7 +1024,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
|
||||
int ret;
|
||||
|
||||
len = sizeof(struct udphdr) + sizeof(struct guehdr);
|
||||
if (!pskb_may_pull(skb, len))
|
||||
if (!pskb_may_pull(skb, transport_offset + len))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
@@ -1059,7 +1059,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
|
||||
|
||||
optlen = guehdr->hlen << 2;
|
||||
|
||||
if (!pskb_may_pull(skb, len + optlen))
|
||||
if (!pskb_may_pull(skb, transport_offset + len + optlen))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
|
@@ -515,9 +515,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
|
||||
mtu = dst_mtu(&rt->dst) - dev->hard_header_len
|
||||
- sizeof(struct iphdr) - tunnel_hlen;
|
||||
else
|
||||
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
|
||||
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
|
||||
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
if (skb_valid_dst(skb))
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (!skb_is_gso(skb) &&
|
||||
@@ -530,9 +531,11 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
|
||||
struct rt6_info *rt6;
|
||||
__be32 daddr;
|
||||
|
||||
rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
|
||||
NULL;
|
||||
daddr = md ? dst : tunnel->parms.iph.daddr;
|
||||
|
||||
if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
|
||||
|
@@ -1303,6 +1303,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
|
||||
if (fnhe->fnhe_daddr == daddr) {
|
||||
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
|
||||
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
|
||||
/* set fnhe_daddr to 0 to ensure it won't bind with
|
||||
* new dsts in rt_bind_exception().
|
||||
*/
|
||||
fnhe->fnhe_daddr = 0;
|
||||
fnhe_flush_routes(fnhe);
|
||||
kfree_rcu(fnhe, rcu);
|
||||
break;
|
||||
@@ -2149,12 +2153,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
int our = 0;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (in_dev)
|
||||
our = ip_check_mc_rcu(in_dev, daddr, saddr,
|
||||
ip_hdr(skb)->protocol);
|
||||
if (!in_dev)
|
||||
return err;
|
||||
our = ip_check_mc_rcu(in_dev, daddr, saddr,
|
||||
ip_hdr(skb)->protocol);
|
||||
|
||||
/* check l3 master if no match yet */
|
||||
if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
|
||||
if (!our && netif_is_l3_slave(dev)) {
|
||||
struct in_device *l3_in_dev;
|
||||
|
||||
l3_in_dev = __in_dev_get_rcu(skb->dev);
|
||||
|
@@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
|
||||
refcount_set(&req->rsk_refcnt, 1);
|
||||
tcp_sk(child)->tsoffset = tsoff;
|
||||
sock_rps_save_rxhash(child, skb);
|
||||
inet_csk_reqsk_queue_add(sk, req, child);
|
||||
if (!inet_csk_reqsk_queue_add(sk, req, child)) {
|
||||
bh_unlock_sock(child);
|
||||
sock_put(child);
|
||||
child = NULL;
|
||||
reqsk_put(req);
|
||||
}
|
||||
} else {
|
||||
reqsk_free(req);
|
||||
}
|
||||
|
@@ -943,6 +943,10 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
|
||||
ssize_t copied;
|
||||
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) &&
|
||||
WARN_ONCE(PageSlab(page), "page must not be a Slab one"))
|
||||
return -EINVAL;
|
||||
|
||||
/* Wait for a connection to finish. One exception is TCP Fast Open
|
||||
* (passive side) where data is allowed to be sent before a connection
|
||||
* is fully established.
|
||||
@@ -1933,6 +1937,11 @@ static int tcp_inq_hint(struct sock *sk)
|
||||
inq = tp->rcv_nxt - tp->copied_seq;
|
||||
release_sock(sk);
|
||||
}
|
||||
/* After receiving a FIN, tell the user-space to continue reading
|
||||
* by returning a non-zero inq.
|
||||
*/
|
||||
if (inq == 0 && sock_flag(sk, SOCK_DONE))
|
||||
inq = 1;
|
||||
return inq;
|
||||
}
|
||||
|
||||
|
@@ -6498,7 +6498,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
af_ops->send_synack(fastopen_sk, dst, &fl, req,
|
||||
&foc, TCP_SYNACK_FASTOPEN);
|
||||
/* Add the child socket directly into the accept queue */
|
||||
inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
|
||||
if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
|
||||
reqsk_fastopen_remove(fastopen_sk, req, false);
|
||||
bh_unlock_sock(fastopen_sk);
|
||||
sock_put(fastopen_sk);
|
||||
reqsk_put(req);
|
||||
goto drop;
|
||||
}
|
||||
sk->sk_data_ready(sk);
|
||||
bh_unlock_sock(fastopen_sk);
|
||||
sock_put(fastopen_sk);
|
||||
|
@@ -94,7 +94,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
int ret;
|
||||
|
||||
len = sizeof(struct udphdr) + sizeof(struct guehdr);
|
||||
if (!pskb_may_pull(skb, len))
|
||||
if (!pskb_may_pull(skb, transport_offset + len))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
@@ -129,7 +129,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
optlen = guehdr->hlen << 2;
|
||||
|
||||
if (!pskb_may_pull(skb, len + optlen))
|
||||
if (!pskb_may_pull(skb, transport_offset + len + optlen))
|
||||
return -EINVAL;
|
||||
|
||||
guehdr = (struct guehdr *)&udp_hdr(skb)[1];
|
||||
|
@@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
|
||||
* normally have to take channel_lock but we do this before anyone else
|
||||
* can see the connection.
|
||||
*/
|
||||
list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
|
||||
list_add(&call->chan_wait_link, &candidate->waiting_calls);
|
||||
|
||||
if (cp->exclusive) {
|
||||
call->conn = candidate;
|
||||
@@ -432,7 +432,7 @@ found_extant_conn:
|
||||
call->conn = conn;
|
||||
call->security_ix = conn->security_ix;
|
||||
call->service_id = conn->service_id;
|
||||
list_add(&call->chan_wait_link, &conn->waiting_calls);
|
||||
list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
|
||||
spin_unlock(&conn->channel_lock);
|
||||
_leave(" = 0 [extant %d]", conn->debug_id);
|
||||
return 0;
|
||||
@@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
|
||||
|
||||
ret = rxrpc_wait_for_channel(call, gfp);
|
||||
if (ret < 0) {
|
||||
trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
|
||||
rxrpc_disconnect_client_call(call);
|
||||
goto out;
|
||||
}
|
||||
@@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
|
||||
*/
|
||||
void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned int channel = call->cid & RXRPC_CHANNELMASK;
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
struct rxrpc_channel *chan = &conn->channels[channel];
|
||||
struct rxrpc_channel *chan = NULL;
|
||||
struct rxrpc_net *rxnet = conn->params.local->rxnet;
|
||||
|
||||
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
|
||||
call->conn = NULL;
|
||||
unsigned int channel = -1;
|
||||
u32 cid;
|
||||
|
||||
spin_lock(&conn->channel_lock);
|
||||
|
||||
cid = call->cid;
|
||||
if (cid) {
|
||||
channel = cid & RXRPC_CHANNELMASK;
|
||||
chan = &conn->channels[channel];
|
||||
}
|
||||
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
|
||||
call->conn = NULL;
|
||||
|
||||
/* Calls that have never actually been assigned a channel can simply be
|
||||
* discarded. If the conn didn't get used either, it will follow
|
||||
* immediately unless someone else grabs it in the meantime.
|
||||
@@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
|
||||
if (rcu_access_pointer(chan->call) != call) {
|
||||
spin_unlock(&conn->channel_lock);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* If a client call was exposed to the world, we save the result for
|
||||
* retransmission.
|
||||
|
@@ -201,14 +201,9 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
|
||||
#ifdef CONFIG_DST_CACHE
|
||||
struct ip_tunnel_info *info = &p->tcft_enc_metadata->u.tun_info;
|
||||
|
||||
dst_cache_destroy(&info->dst_cache);
|
||||
#endif
|
||||
if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(&p->tcft_enc_metadata->dst);
|
||||
}
|
||||
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
@@ -338,7 +333,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
&metadata->u.tun_info,
|
||||
opts_len, extack);
|
||||
if (ret < 0)
|
||||
goto release_dst_cache;
|
||||
goto release_tun_meta;
|
||||
}
|
||||
|
||||
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
|
||||
@@ -354,14 +349,14 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
&act_tunnel_key_ops, bind, true);
|
||||
if (ret) {
|
||||
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
|
||||
goto release_dst_cache;
|
||||
goto release_tun_meta;
|
||||
}
|
||||
|
||||
ret = ACT_P_CREATED;
|
||||
} else if (!ovr) {
|
||||
NL_SET_ERR_MSG(extack, "TC IDR already exists");
|
||||
ret = -EEXIST;
|
||||
goto release_dst_cache;
|
||||
goto release_tun_meta;
|
||||
}
|
||||
|
||||
t = to_tunnel_key(*a);
|
||||
@@ -371,7 +366,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
|
||||
ret = -ENOMEM;
|
||||
exists = true;
|
||||
goto release_dst_cache;
|
||||
goto release_tun_meta;
|
||||
}
|
||||
params_new->tcft_action = parm->t_action;
|
||||
params_new->tcft_enc_metadata = metadata;
|
||||
@@ -388,12 +383,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
|
||||
return ret;
|
||||
|
||||
release_dst_cache:
|
||||
#ifdef CONFIG_DST_CACHE
|
||||
if (metadata)
|
||||
dst_cache_destroy(&metadata->u.tun_info.dst_cache);
|
||||
release_tun_meta:
|
||||
#endif
|
||||
if (metadata)
|
||||
dst_release(&metadata->dst);
|
||||
|
||||
|
@@ -470,10 +470,9 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
|
||||
{
|
||||
struct tcf_block *block = chain->block;
|
||||
const struct tcf_proto_ops *tmplt_ops;
|
||||
bool is_last, free_block = false;
|
||||
bool free_block = false;
|
||||
unsigned int refcnt;
|
||||
void *tmplt_priv;
|
||||
u32 chain_index;
|
||||
|
||||
mutex_lock(&block->lock);
|
||||
if (explicitly_created) {
|
||||
@@ -492,23 +491,21 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
|
||||
* save these to temporary variables.
|
||||
*/
|
||||
refcnt = --chain->refcnt;
|
||||
is_last = refcnt - chain->action_refcnt == 0;
|
||||
tmplt_ops = chain->tmplt_ops;
|
||||
tmplt_priv = chain->tmplt_priv;
|
||||
chain_index = chain->index;
|
||||
|
||||
if (refcnt == 0)
|
||||
free_block = tcf_chain_detach(chain);
|
||||
mutex_unlock(&block->lock);
|
||||
|
||||
/* The last dropped non-action reference will trigger notification. */
|
||||
if (is_last && !by_act) {
|
||||
tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain_index,
|
||||
if (refcnt - chain->action_refcnt == 0 && !by_act) {
|
||||
tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
|
||||
block, NULL, 0, 0, false);
|
||||
/* Last reference to chain, no need to lock. */
|
||||
chain->flushing = false;
|
||||
}
|
||||
|
||||
if (refcnt == 0)
|
||||
free_block = tcf_chain_detach(chain);
|
||||
mutex_unlock(&block->lock);
|
||||
|
||||
if (refcnt == 0) {
|
||||
tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
|
||||
tcf_chain_destroy(chain, free_block);
|
||||
|
@@ -1348,6 +1348,24 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
if (tb[TCA_FLOWER_FLAGS]) {
|
||||
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
|
||||
|
||||
if (!tc_flags_valid(fnew->flags)) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
|
||||
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
|
||||
tp->chain->tmplt_priv, extack);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
err = fl_check_assign_mask(head, fnew, fold, mask);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
if (!handle) {
|
||||
handle = 1;
|
||||
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
|
||||
@@ -1358,36 +1376,18 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
handle, GFP_KERNEL);
|
||||
}
|
||||
if (err)
|
||||
goto errout;
|
||||
goto errout_mask;
|
||||
fnew->handle = handle;
|
||||
|
||||
if (tb[TCA_FLOWER_FLAGS]) {
|
||||
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
|
||||
|
||||
if (!tc_flags_valid(fnew->flags)) {
|
||||
err = -EINVAL;
|
||||
goto errout_idr;
|
||||
}
|
||||
}
|
||||
|
||||
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
|
||||
tp->chain->tmplt_priv, extack);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
err = fl_check_assign_mask(head, fnew, fold, mask);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
|
||||
err = -EEXIST;
|
||||
goto errout_mask;
|
||||
goto errout_idr;
|
||||
}
|
||||
|
||||
err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
|
||||
fnew->mask->filter_ht_params);
|
||||
if (err)
|
||||
goto errout_mask;
|
||||
goto errout_idr;
|
||||
|
||||
if (!tc_skip_hw(fnew->flags)) {
|
||||
err = fl_hw_replace_filter(tp, fnew, extack);
|
||||
@@ -1426,12 +1426,13 @@ errout_mask_ht:
|
||||
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
|
||||
fnew->mask->filter_ht_params);
|
||||
|
||||
errout_mask:
|
||||
fl_mask_put(head, fnew->mask, false);
|
||||
|
||||
errout_idr:
|
||||
if (!fold)
|
||||
idr_remove(&head->handle_idr, fnew->handle);
|
||||
|
||||
errout_mask:
|
||||
fl_mask_put(head, fnew->mask, false);
|
||||
|
||||
errout:
|
||||
tcf_exts_destroy(&fnew->exts);
|
||||
kfree(fnew);
|
||||
|
@@ -471,12 +471,6 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
|
||||
struct crypto_shash *tfm = NULL;
|
||||
__u16 id;
|
||||
|
||||
/* If AUTH extension is disabled, we are done */
|
||||
if (!ep->auth_enable) {
|
||||
ep->auth_hmacs = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If the transforms are already allocated, we are done */
|
||||
if (ep->auth_hmacs)
|
||||
return 0;
|
||||
|
@@ -107,6 +107,13 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
auth_chunks->param_hdr.length =
|
||||
htons(sizeof(struct sctp_paramhdr) + 2);
|
||||
}
|
||||
|
||||
/* Allocate and initialize transorms arrays for supported
|
||||
* HMACs.
|
||||
*/
|
||||
err = sctp_auth_init_hmacs(ep, gfp);
|
||||
if (err)
|
||||
goto nomem;
|
||||
}
|
||||
|
||||
/* Initialize the base structure. */
|
||||
@@ -150,15 +157,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
|
||||
null_key = sctp_auth_shkey_create(0, gfp);
|
||||
if (!null_key)
|
||||
goto nomem;
|
||||
goto nomem_shkey;
|
||||
|
||||
list_add(&null_key->key_list, &ep->endpoint_shared_keys);
|
||||
|
||||
/* Allocate and initialize transorms arrays for supported HMACs. */
|
||||
err = sctp_auth_init_hmacs(ep, gfp);
|
||||
if (err)
|
||||
goto nomem_hmacs;
|
||||
|
||||
/* Add the null key to the endpoint shared keys list and
|
||||
* set the hmcas and chunks pointers.
|
||||
*/
|
||||
@@ -169,8 +171,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
|
||||
return ep;
|
||||
|
||||
nomem_hmacs:
|
||||
sctp_auth_destroy_keys(&ep->endpoint_shared_keys);
|
||||
nomem_shkey:
|
||||
sctp_auth_destroy_hmacs(ep->auth_hmacs);
|
||||
nomem:
|
||||
/* Free all allocations */
|
||||
kfree(auth_hmacs);
|
||||
|
@@ -102,9 +102,9 @@ static int sctp_send_asconf(struct sctp_association *asoc,
|
||||
struct sctp_chunk *chunk);
|
||||
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
|
||||
static int sctp_autobind(struct sock *sk);
|
||||
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
struct sctp_association *assoc,
|
||||
enum sctp_socket_type type);
|
||||
static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
struct sctp_association *assoc,
|
||||
enum sctp_socket_type type);
|
||||
|
||||
static unsigned long sctp_memory_pressure;
|
||||
static atomic_long_t sctp_memory_allocated;
|
||||
@@ -4891,7 +4891,11 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
|
||||
/* Populate the fields of the newsk from the oldsk and migrate the
|
||||
* asoc to the newsk.
|
||||
*/
|
||||
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
|
||||
error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
|
||||
if (error) {
|
||||
sk_common_release(newsk);
|
||||
newsk = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
@@ -5639,7 +5643,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
||||
/* Populate the fields of the newsk from the oldsk and migrate the
|
||||
* asoc to the newsk.
|
||||
*/
|
||||
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
|
||||
err = sctp_sock_migrate(sk, sock->sk, asoc,
|
||||
SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
|
||||
if (err) {
|
||||
sock_release(sock);
|
||||
sock = NULL;
|
||||
}
|
||||
|
||||
*sockp = sock;
|
||||
|
||||
@@ -9171,9 +9180,9 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
|
||||
/* Populate the fields of the newsk from the oldsk and migrate the assoc
|
||||
* and its messages to the newsk.
|
||||
*/
|
||||
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
struct sctp_association *assoc,
|
||||
enum sctp_socket_type type)
|
||||
static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
struct sctp_association *assoc,
|
||||
enum sctp_socket_type type)
|
||||
{
|
||||
struct sctp_sock *oldsp = sctp_sk(oldsk);
|
||||
struct sctp_sock *newsp = sctp_sk(newsk);
|
||||
@@ -9182,6 +9191,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
struct sk_buff *skb, *tmp;
|
||||
struct sctp_ulpevent *event;
|
||||
struct sctp_bind_hashbucket *head;
|
||||
int err;
|
||||
|
||||
/* Migrate socket buffer sizes and all the socket level options to the
|
||||
* new socket.
|
||||
@@ -9210,8 +9220,20 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
/* Copy the bind_addr list from the original endpoint to the new
|
||||
* endpoint so that we can handle restarts properly
|
||||
*/
|
||||
sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
|
||||
&oldsp->ep->base.bind_addr, GFP_KERNEL);
|
||||
err = sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
|
||||
&oldsp->ep->base.bind_addr, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* New ep's auth_hmacs should be set if old ep's is set, in case
|
||||
* that net->sctp.auth_enable has been changed to 0 by users and
|
||||
* new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
|
||||
*/
|
||||
if (oldsp->ep->auth_hmacs) {
|
||||
err = sctp_auth_init_hmacs(newsp->ep, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Move any messages in the old socket's receive queue that are for the
|
||||
* peeled off association to the new socket's receive queue.
|
||||
@@ -9296,6 +9318,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
}
|
||||
|
||||
release_sock(newsk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -230,8 +230,6 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
|
||||
for (i = 0; i < stream->outcnt; i++)
|
||||
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
|
||||
|
||||
sched->init(stream);
|
||||
|
||||
in:
|
||||
sctp_stream_interleave_init(stream);
|
||||
if (!incnt)
|
||||
|
@@ -1333,7 +1333,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
|
||||
if (unlikely(!dest)) {
|
||||
dest = &tsk->peer;
|
||||
if (!syn || dest->family != AF_TIPC)
|
||||
if (!syn && dest->family != AF_TIPC)
|
||||
return -EDESTADDRREQ;
|
||||
}
|
||||
|
||||
|
@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
|
||||
*/
|
||||
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
|
||||
{
|
||||
const struct virtio_transport *t;
|
||||
struct virtio_vsock_pkt *reply;
|
||||
struct virtio_vsock_pkt_info info = {
|
||||
.op = VIRTIO_VSOCK_OP_RST,
|
||||
.type = le16_to_cpu(pkt->hdr.type),
|
||||
@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
|
||||
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
|
||||
return 0;
|
||||
|
||||
pkt = virtio_transport_alloc_pkt(&info, 0,
|
||||
le64_to_cpu(pkt->hdr.dst_cid),
|
||||
le32_to_cpu(pkt->hdr.dst_port),
|
||||
le64_to_cpu(pkt->hdr.src_cid),
|
||||
le32_to_cpu(pkt->hdr.src_port));
|
||||
if (!pkt)
|
||||
reply = virtio_transport_alloc_pkt(&info, 0,
|
||||
le64_to_cpu(pkt->hdr.dst_cid),
|
||||
le32_to_cpu(pkt->hdr.dst_port),
|
||||
le64_to_cpu(pkt->hdr.src_cid),
|
||||
le32_to_cpu(pkt->hdr.src_port));
|
||||
if (!reply)
|
||||
return -ENOMEM;
|
||||
|
||||
return virtio_transport_get_ops()->send_pkt(pkt);
|
||||
t = virtio_transport_get_ops();
|
||||
if (!t) {
|
||||
virtio_transport_free_pkt(reply);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
return t->send_pkt(reply);
|
||||
}
|
||||
|
||||
static void virtio_transport_wait_close(struct sock *sk, long timeout)
|
||||
|
@@ -820,8 +820,12 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
sock->state = SS_CONNECTED;
|
||||
rc = 0;
|
||||
out_put_neigh:
|
||||
if (rc)
|
||||
if (rc) {
|
||||
read_lock_bh(&x25_list_lock);
|
||||
x25_neigh_put(x25->neighbour);
|
||||
x25->neighbour = NULL;
|
||||
read_unlock_bh(&x25_list_lock);
|
||||
}
|
||||
out_put_route:
|
||||
x25_route_put(rt);
|
||||
out:
|
||||
|
@@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
if (sxdp->sxdp_family != AF_XDP)
|
||||
return -EINVAL;
|
||||
|
||||
flags = sxdp->sxdp_flags;
|
||||
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xs->mutex);
|
||||
if (xs->dev) {
|
||||
err = -EBUSY;
|
||||
@@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
}
|
||||
|
||||
qid = sxdp->sxdp_queue_id;
|
||||
flags = sxdp->sxdp_flags;
|
||||
|
||||
if (flags & XDP_SHARED_UMEM) {
|
||||
struct xdp_sock *umem_xs;
|
||||
|
@@ -68,9 +68,9 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
|
||||
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
|
||||
|
||||
if (!err && umem->fq)
|
||||
err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb);
|
||||
err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
|
||||
if (!err && umem->cq) {
|
||||
err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING,
|
||||
err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
|
||||
nlskb);
|
||||
}
|
||||
return err;
|
||||
|
@@ -174,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
|
||||
if (!xskq_is_valid_addr(q, d->addr))
|
||||
return false;
|
||||
|
||||
if (((d->addr + d->len) & q->chunk_mask) !=
|
||||
(d->addr & q->chunk_mask)) {
|
||||
if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
|
||||
d->options) {
|
||||
q->invalid_descs++;
|
||||
return false;
|
||||
}
|
||||
|
Reference in New Issue
Block a user