Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:1) Use after free in __dev_map_entry_free(), from Eric Dumazet. 1) Use after free in __dev_map_entry_free(), from Eric Dumazet. 2) Fix TCP retransmission timestamps on passive Fast Open, from Yuchung Cheng. 3) Orphan NFC, we'll take the patches directly into my tree. From Johannes Berg. 4) We can't recycle cloned TCP skbs, from Eric Dumazet. 5) Some flow dissector bpf test fixes, from Stanislav Fomichev. 6) Fix RCU marking and warnings in rhashtable, from Herbert Xu. 7) Fix some potential fib6 leaks, from Eric Dumazet. 8) Fix a _decode_session4 uninitialized memory read bug fix that got lost in a merge. From Florian Westphal. 9) Fix ipv6 source address routing wrt. exception route entries, from Wei Wang. 10) The netdev_xmit_more() conversion was not done %100 properly in mlx5 driver, fix from Tariq Toukan. 11) Clean up botched merge on netfilter kselftest, from Florian Westphal. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (74 commits) of_net: fix of_get_mac_address retval if compiled without CONFIG_OF net: fix kernel-doc warnings for socket.c net: Treat sock->sk_drops as an unsigned int when printing kselftests: netfilter: fix leftover net/net-next merge conflict mlxsw: core: Prevent reading unsupported slave address from SFP EEPROM mlxsw: core: Prevent QSFP module initialization for old hardware vsock/virtio: Initialize core virtio vsock before registering the driver net/mlx5e: Fix possible modify header actions memory leak net/mlx5e: Fix no rewrite fields with the same match net/mlx5e: Additional check for flow destination comparison net/mlx5e: Add missing ethtool driver info for representors net/mlx5e: Fix number of vports for ingress ACL configuration net/mlx5e: Fix ethtool rxfh commands when CONFIG_MLX5_EN_RXNFC is disabled net/mlx5e: Fix wrong xmit_more application net/mlx5: Fix peer pf disable hca command net/mlx5: E-Switch, Correct type to u16 for vport_num and int for vport_index net/mlx5: Add meaningful return codes to status_to_err function net/mlx5: Imply MLXFW in mlx5_core Revert "tipc: fix modprobe tipc failed after switch order of device registration" vsock/virtio: free packets during the socket release ...
Dieser Commit ist enthalten in:
@@ -26,7 +26,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
|
||||
cfsrvl_init(dbg, channel_id, dev_info, false);
|
||||
dbg->layer.receive = cfdbgl_receive;
|
||||
dbg->layer.transmit = cfdbgl_transmit;
|
||||
snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
|
||||
snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ, "dbg%d", channel_id);
|
||||
return &dbg->layer;
|
||||
}
|
||||
|
||||
|
@@ -33,8 +33,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
|
||||
cfsrvl_init(dgm, channel_id, dev_info, true);
|
||||
dgm->layer.receive = cfdgml_receive;
|
||||
dgm->layer.transmit = cfdgml_transmit;
|
||||
snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
|
||||
dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
|
||||
snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ, "dgm%d", channel_id);
|
||||
return &dgm->layer;
|
||||
}
|
||||
|
||||
|
@@ -33,7 +33,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
|
||||
cfsrvl_init(util, channel_id, dev_info, true);
|
||||
util->layer.receive = cfutill_receive;
|
||||
util->layer.transmit = cfutill_transmit;
|
||||
snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
|
||||
snprintf(util->layer.name, CAIF_LAYER_NAME_SZ, "util1");
|
||||
return &util->layer;
|
||||
}
|
||||
|
||||
|
@@ -32,7 +32,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
|
||||
cfsrvl_init(vei, channel_id, dev_info, true);
|
||||
vei->layer.receive = cfvei_receive;
|
||||
vei->layer.transmit = cfvei_transmit;
|
||||
snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
|
||||
snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ, "vei%d", channel_id);
|
||||
return &vei->layer;
|
||||
}
|
||||
|
||||
|
@@ -29,7 +29,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
|
||||
cfsrvl_init(vid, channel_id, dev_info, false);
|
||||
vid->layer.receive = cfvidl_receive;
|
||||
vid->layer.transmit = cfvidl_transmit;
|
||||
snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
|
||||
snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ, "vid1");
|
||||
return &vid->layer;
|
||||
}
|
||||
|
||||
|
@@ -8927,7 +8927,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
|
||||
|
||||
refcnt = netdev_refcnt_read(dev);
|
||||
|
||||
if (time_after(jiffies, warning_time + 10 * HZ)) {
|
||||
if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
|
||||
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
|
||||
dev->name, refcnt);
|
||||
warning_time = jiffies;
|
||||
|
@@ -54,6 +54,13 @@ void flow_rule_match_vlan(const struct flow_rule *rule,
|
||||
}
|
||||
EXPORT_SYMBOL(flow_rule_match_vlan);
|
||||
|
||||
void flow_rule_match_cvlan(const struct flow_rule *rule,
|
||||
struct flow_match_vlan *out)
|
||||
{
|
||||
FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
|
||||
}
|
||||
EXPORT_SYMBOL(flow_rule_match_cvlan);
|
||||
|
||||
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
|
||||
struct flow_match_ipv4_addrs *out)
|
||||
{
|
||||
|
@@ -1496,14 +1496,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
|
||||
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
|
||||
bool force)
|
||||
{
|
||||
int ifindex = dev_get_iflink(dev);
|
||||
|
||||
if (dev->ifindex == ifindex)
|
||||
return 0;
|
||||
if (force || dev->ifindex != ifindex)
|
||||
return nla_put_u32(skb, IFLA_LINK, ifindex);
|
||||
|
||||
return nla_put_u32(skb, IFLA_LINK, ifindex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
|
||||
@@ -1520,6 +1521,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
|
||||
const struct net_device *dev,
|
||||
struct net *src_net)
|
||||
{
|
||||
bool put_iflink = false;
|
||||
|
||||
if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
|
||||
struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
|
||||
|
||||
@@ -1528,10 +1531,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
|
||||
|
||||
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
|
||||
return -EMSGSIZE;
|
||||
|
||||
put_iflink = true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return nla_put_iflink(skb, dev, put_iflink);
|
||||
}
|
||||
|
||||
static int rtnl_fill_link_af(struct sk_buff *skb,
|
||||
@@ -1617,7 +1622,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
|
||||
#ifdef CONFIG_RPS
|
||||
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
|
||||
#endif
|
||||
nla_put_iflink(skb, dev) ||
|
||||
put_master_ifindex(skb, dev) ||
|
||||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
|
||||
(dev->qdisc &&
|
||||
|
@@ -411,6 +411,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
sk_mem_charge(sk, skb->len);
|
||||
copied = skb->len;
|
||||
msg->sg.start = 0;
|
||||
msg->sg.size = copied;
|
||||
msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
|
||||
msg->skb = skb;
|
||||
|
||||
@@ -554,8 +555,10 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
|
||||
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
|
||||
|
||||
/* No sk_callback_lock since already detached. */
|
||||
strp_stop(&psock->parser.strp);
|
||||
strp_done(&psock->parser.strp);
|
||||
|
||||
/* Parser has been stopped */
|
||||
if (psock->progs.skb_parser)
|
||||
strp_done(&psock->parser.strp);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
|
||||
|
@@ -30,13 +30,11 @@ static int bpfilter_mbox_request(struct sock *sk, int optname,
|
||||
mutex_lock(&bpfilter_ops.lock);
|
||||
if (!bpfilter_ops.sockopt) {
|
||||
mutex_unlock(&bpfilter_ops.lock);
|
||||
err = request_module("bpfilter");
|
||||
request_module("bpfilter");
|
||||
mutex_lock(&bpfilter_ops.lock);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
if (!bpfilter_ops.sockopt) {
|
||||
err = -ECHILD;
|
||||
err = -ENOPROTOOPT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@@ -1113,7 +1113,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
|
||||
__u16 srcp = ntohs(inet->inet_sport);
|
||||
|
||||
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
|
||||
bucket, src, srcp, dest, destp, sp->sk_state,
|
||||
sk_wmem_alloc_get(sp),
|
||||
sk_rmem_alloc_get(sp),
|
||||
|
@@ -1076,7 +1076,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
|
||||
srcp = inet->inet_num;
|
||||
|
||||
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
|
||||
i, src, srcp, dest, destp, sp->sk_state,
|
||||
sk_wmem_alloc_get(sp),
|
||||
sk_rmem_alloc_get(sp),
|
||||
|
@@ -855,7 +855,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
|
||||
|
||||
if (likely(!size)) {
|
||||
skb = sk->sk_tx_skb_cache;
|
||||
if (skb && !skb_cloned(skb)) {
|
||||
if (skb) {
|
||||
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
|
||||
sk->sk_tx_skb_cache = NULL;
|
||||
pskb_trim(skb, 0);
|
||||
|
@@ -27,7 +27,10 @@ static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
int flags, long timeo, int *err)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (!timeo)
|
||||
return ret;
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
|
||||
@@ -528,8 +531,6 @@ static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
struct sk_psock_link *link;
|
||||
|
||||
sk_psock_cork_free(psock);
|
||||
__sk_psock_purge_ingress_msg(psock);
|
||||
while ((link = sk_psock_link_pop(psock))) {
|
||||
sk_psock_unlink(sk, link);
|
||||
sk_psock_free_link(link);
|
||||
|
@@ -6024,6 +6024,9 @@ reset_and_undo:
|
||||
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
|
||||
{
|
||||
tcp_try_undo_loss(sk, false);
|
||||
|
||||
/* Reset rtx states to prevent spurious retransmits_timed_out() */
|
||||
tcp_sk(sk)->retrans_stamp = 0;
|
||||
inet_csk(sk)->icsk_retransmits = 0;
|
||||
|
||||
/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
|
||||
|
@@ -2883,7 +2883,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
|
||||
__u16 srcp = ntohs(inet->inet_sport);
|
||||
|
||||
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
|
||||
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
|
||||
bucket, src, srcp, dest, destp, sp->sk_state,
|
||||
sk_wmem_alloc_get(sp),
|
||||
udp_rqueue_get(sp),
|
||||
|
@@ -1034,7 +1034,7 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
|
||||
src = &sp->sk_v6_rcv_saddr;
|
||||
seq_printf(seq,
|
||||
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
|
||||
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
|
||||
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
|
||||
bucket,
|
||||
src->s6_addr32[0], src->s6_addr32[1],
|
||||
src->s6_addr32[2], src->s6_addr32[3], srcp,
|
||||
|
@@ -904,6 +904,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/* Make sure rt6_make_pcpu_route() wont add other percpu routes
|
||||
* while we are cleaning them here.
|
||||
*/
|
||||
f6i->fib6_destroying = 1;
|
||||
mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
|
||||
|
||||
/* release the reference to this fib entry from
|
||||
* all of its cached pcpu routes
|
||||
*/
|
||||
@@ -927,6 +933,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
|
||||
{
|
||||
struct fib6_table *table = rt->fib6_table;
|
||||
|
||||
if (rt->rt6i_pcpu)
|
||||
fib6_drop_pcpu_from(rt, table);
|
||||
|
||||
if (refcount_read(&rt->fib6_ref) != 1) {
|
||||
/* This route is used as dummy address holder in some split
|
||||
* nodes. It is not leaked, but it still holds other resources,
|
||||
@@ -948,9 +957,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
|
||||
fn = rcu_dereference_protected(fn->parent,
|
||||
lockdep_is_held(&table->tb6_lock));
|
||||
}
|
||||
|
||||
if (rt->rt6i_pcpu)
|
||||
fib6_drop_pcpu_from(rt, table);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -111,8 +111,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
|
||||
int iif, int type, u32 portid, u32 seq,
|
||||
unsigned int flags);
|
||||
static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr);
|
||||
const struct in6_addr *daddr,
|
||||
const struct in6_addr *saddr);
|
||||
|
||||
#ifdef CONFIG_IPV6_ROUTE_INFO
|
||||
static struct fib6_info *rt6_add_route_info(struct net *net,
|
||||
@@ -1295,6 +1295,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
|
||||
prev = cmpxchg(p, NULL, pcpu_rt);
|
||||
BUG_ON(prev);
|
||||
|
||||
if (res->f6i->fib6_destroying) {
|
||||
struct fib6_info *from;
|
||||
|
||||
from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
|
||||
fib6_info_release(from);
|
||||
}
|
||||
|
||||
return pcpu_rt;
|
||||
}
|
||||
|
||||
@@ -1566,31 +1573,44 @@ out:
|
||||
* Caller has to hold rcu_read_lock()
|
||||
*/
|
||||
static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
|
||||
struct in6_addr *daddr,
|
||||
struct in6_addr *saddr)
|
||||
const struct in6_addr *daddr,
|
||||
const struct in6_addr *saddr)
|
||||
{
|
||||
const struct in6_addr *src_key = NULL;
|
||||
struct rt6_exception_bucket *bucket;
|
||||
struct in6_addr *src_key = NULL;
|
||||
struct rt6_exception *rt6_ex;
|
||||
struct rt6_info *ret = NULL;
|
||||
|
||||
bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
|
||||
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
/* fib6i_src.plen != 0 indicates f6i is in subtree
|
||||
* and exception table is indexed by a hash of
|
||||
* both fib6_dst and fib6_src.
|
||||
* Otherwise, the exception table is indexed by
|
||||
* a hash of only fib6_dst.
|
||||
* However, the src addr used to create the hash
|
||||
* might not be exactly the passed in saddr which
|
||||
* is a /128 addr from the flow.
|
||||
* So we need to use f6i->fib6_src to redo lookup
|
||||
* if the passed in saddr does not find anything.
|
||||
* (See the logic in ip6_rt_cache_alloc() on how
|
||||
* rt->rt6i_src is updated.)
|
||||
*/
|
||||
if (res->f6i->fib6_src.plen)
|
||||
src_key = saddr;
|
||||
find_ex:
|
||||
#endif
|
||||
bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
|
||||
rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
|
||||
|
||||
if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
|
||||
ret = rt6_ex->rt6i;
|
||||
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
/* Use fib6_src as src_key and redo lookup */
|
||||
if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
|
||||
src_key = &res->f6i->fib6_src.addr;
|
||||
goto find_ex;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2665,12 +2685,10 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
|
||||
const struct in6_addr *daddr,
|
||||
const struct in6_addr *saddr)
|
||||
{
|
||||
struct rt6_exception_bucket *bucket;
|
||||
const struct fib6_nh *nh = res->nh;
|
||||
struct fib6_info *f6i = res->f6i;
|
||||
const struct in6_addr *src_key;
|
||||
struct rt6_exception *rt6_ex;
|
||||
struct inet6_dev *idev;
|
||||
struct rt6_info *rt;
|
||||
u32 mtu = 0;
|
||||
|
||||
if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
|
||||
@@ -2679,18 +2697,10 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
|
||||
goto out;
|
||||
}
|
||||
|
||||
src_key = NULL;
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
if (f6i->fib6_src.plen)
|
||||
src_key = saddr;
|
||||
#endif
|
||||
|
||||
bucket = rcu_dereference(f6i->rt6i_exception_bucket);
|
||||
rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
|
||||
if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
|
||||
mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
|
||||
|
||||
if (likely(!mtu)) {
|
||||
rt = rt6_find_cached_rt(res, daddr, saddr);
|
||||
if (unlikely(rt)) {
|
||||
mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
|
||||
} else {
|
||||
struct net_device *dev = nh->fib_nh_dev;
|
||||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
@@ -2642,7 +2642,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
|
||||
struct sock *s = v;
|
||||
struct netlink_sock *nlk = nlk_sk(s);
|
||||
|
||||
seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8d %-8lu\n",
|
||||
seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n",
|
||||
s,
|
||||
s->sk_protocol,
|
||||
nlk->portid,
|
||||
|
@@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
|
||||
struct pn_sock *pn = pn_sk(sk);
|
||||
|
||||
seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
|
||||
"%d %pK %d",
|
||||
"%d %pK %u",
|
||||
sk->sk_protocol, pn->sobject, pn->dobject,
|
||||
pn->resource, sk->sk_state,
|
||||
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
|
||||
|
34
net/socket.c
34
net/socket.c
@@ -645,14 +645,6 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
|
||||
}
|
||||
EXPORT_SYMBOL(__sock_tx_timestamp);
|
||||
|
||||
/**
|
||||
* sock_sendmsg - send a message through @sock
|
||||
* @sock: socket
|
||||
* @msg: message to send
|
||||
*
|
||||
* Sends @msg through @sock, passing through LSM.
|
||||
* Returns the number of bytes sent, or an error code.
|
||||
*/
|
||||
INDIRECT_CALLABLE_DECLARE(int inet_sendmsg(struct socket *, struct msghdr *,
|
||||
size_t));
|
||||
static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
|
||||
@@ -663,6 +655,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* sock_sendmsg - send a message through @sock
|
||||
* @sock: socket
|
||||
* @msg: message to send
|
||||
*
|
||||
* Sends @msg through @sock, passing through LSM.
|
||||
* Returns the number of bytes sent, or an error code.
|
||||
*/
|
||||
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
|
||||
{
|
||||
int err = security_socket_sendmsg(sock, msg,
|
||||
@@ -875,15 +875,6 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
|
||||
|
||||
/**
|
||||
* sock_recvmsg - receive a message from @sock
|
||||
* @sock: socket
|
||||
* @msg: message to receive
|
||||
* @flags: message flags
|
||||
*
|
||||
* Receives @msg from @sock, passing through LSM. Returns the total number
|
||||
* of bytes received, or an error.
|
||||
*/
|
||||
INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *,
|
||||
size_t , int ));
|
||||
static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
|
||||
@@ -893,6 +884,15 @@ static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
|
||||
msg_data_left(msg), flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* sock_recvmsg - receive a message from @sock
|
||||
* @sock: socket
|
||||
* @msg: message to receive
|
||||
* @flags: message flags
|
||||
*
|
||||
* Receives @msg from @sock, passing through LSM. Returns the total number
|
||||
* of bytes received, or an error.
|
||||
*/
|
||||
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
|
||||
{
|
||||
int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags);
|
||||
|
@@ -131,10 +131,6 @@ static int __init tipc_init(void)
|
||||
if (err)
|
||||
goto out_netlink_compat;
|
||||
|
||||
err = tipc_socket_init();
|
||||
if (err)
|
||||
goto out_socket;
|
||||
|
||||
err = tipc_register_sysctl();
|
||||
if (err)
|
||||
goto out_sysctl;
|
||||
@@ -143,6 +139,10 @@ static int __init tipc_init(void)
|
||||
if (err)
|
||||
goto out_pernet;
|
||||
|
||||
err = tipc_socket_init();
|
||||
if (err)
|
||||
goto out_socket;
|
||||
|
||||
err = tipc_bearer_setup();
|
||||
if (err)
|
||||
goto out_bearer;
|
||||
@@ -150,12 +150,12 @@ static int __init tipc_init(void)
|
||||
pr_info("Started in single node mode\n");
|
||||
return 0;
|
||||
out_bearer:
|
||||
tipc_socket_stop();
|
||||
out_socket:
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
out_pernet:
|
||||
tipc_unregister_sysctl();
|
||||
out_sysctl:
|
||||
tipc_socket_stop();
|
||||
out_socket:
|
||||
tipc_netlink_compat_stop();
|
||||
out_netlink_compat:
|
||||
tipc_netlink_stop();
|
||||
@@ -167,10 +167,10 @@ out_netlink:
|
||||
static void __exit tipc_exit(void)
|
||||
{
|
||||
tipc_bearer_cleanup();
|
||||
tipc_socket_stop();
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
tipc_netlink_stop();
|
||||
tipc_netlink_compat_stop();
|
||||
tipc_socket_stop();
|
||||
tipc_unregister_sysctl();
|
||||
|
||||
pr_info("Deactivated\n");
|
||||
|
@@ -35,6 +35,9 @@
|
||||
/* The MTU is 16KB per the host side's design */
|
||||
#define HVS_MTU_SIZE (1024 * 16)
|
||||
|
||||
/* How long to wait for graceful shutdown of a connection */
|
||||
#define HVS_CLOSE_TIMEOUT (8 * HZ)
|
||||
|
||||
struct vmpipe_proto_header {
|
||||
u32 pkt_type;
|
||||
u32 data_size;
|
||||
@@ -305,19 +308,32 @@ static void hvs_channel_cb(void *ctx)
|
||||
sk->sk_write_space(sk);
|
||||
}
|
||||
|
||||
static void hvs_do_close_lock_held(struct vsock_sock *vsk,
|
||||
bool cancel_timeout)
|
||||
{
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
|
||||
sock_set_flag(sk, SOCK_DONE);
|
||||
vsk->peer_shutdown = SHUTDOWN_MASK;
|
||||
if (vsock_stream_has_data(vsk) <= 0)
|
||||
sk->sk_state = TCP_CLOSING;
|
||||
sk->sk_state_change(sk);
|
||||
if (vsk->close_work_scheduled &&
|
||||
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
|
||||
vsk->close_work_scheduled = false;
|
||||
vsock_remove_sock(vsk);
|
||||
|
||||
/* Release the reference taken while scheduling the timeout */
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvs_close_connection(struct vmbus_channel *chan)
|
||||
{
|
||||
struct sock *sk = get_per_channel_state(chan);
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
sock_set_flag(sk, SOCK_DONE);
|
||||
vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
|
||||
|
||||
sk->sk_state_change(sk);
|
||||
|
||||
hvs_do_close_lock_held(vsock_sk(sk), true);
|
||||
release_sock(sk);
|
||||
}
|
||||
|
||||
@@ -452,50 +468,80 @@ static int hvs_connect(struct vsock_sock *vsk)
|
||||
return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
|
||||
}
|
||||
|
||||
static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
|
||||
{
|
||||
struct vmpipe_proto_header hdr;
|
||||
|
||||
if (hvs->fin_sent || !hvs->chan)
|
||||
return;
|
||||
|
||||
/* It can't fail: see hvs_channel_writable_bytes(). */
|
||||
(void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
|
||||
hvs->fin_sent = true;
|
||||
}
|
||||
|
||||
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
|
||||
{
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
struct vmpipe_proto_header hdr;
|
||||
struct hvs_send_buf *send_buf;
|
||||
struct hvsock *hvs;
|
||||
|
||||
if (!(mode & SEND_SHUTDOWN))
|
||||
return 0;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
hvs = vsk->trans;
|
||||
if (hvs->fin_sent)
|
||||
goto out;
|
||||
|
||||
send_buf = (struct hvs_send_buf *)&hdr;
|
||||
|
||||
/* It can't fail: see hvs_channel_writable_bytes(). */
|
||||
(void)hvs_send_data(hvs->chan, send_buf, 0);
|
||||
|
||||
hvs->fin_sent = true;
|
||||
out:
|
||||
hvs_shutdown_lock_held(vsk->trans, mode);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvs_close_timeout(struct work_struct *work)
|
||||
{
|
||||
struct vsock_sock *vsk =
|
||||
container_of(work, struct vsock_sock, close_work.work);
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
|
||||
sock_hold(sk);
|
||||
lock_sock(sk);
|
||||
if (!sock_flag(sk, SOCK_DONE))
|
||||
hvs_do_close_lock_held(vsk, false);
|
||||
|
||||
vsk->close_work_scheduled = false;
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
/* Returns true, if it is safe to remove socket; false otherwise */
|
||||
static bool hvs_close_lock_held(struct vsock_sock *vsk)
|
||||
{
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
|
||||
if (!(sk->sk_state == TCP_ESTABLISHED ||
|
||||
sk->sk_state == TCP_CLOSING))
|
||||
return true;
|
||||
|
||||
if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
|
||||
hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
|
||||
|
||||
if (sock_flag(sk, SOCK_DONE))
|
||||
return true;
|
||||
|
||||
/* This reference will be dropped by the delayed close routine */
|
||||
sock_hold(sk);
|
||||
INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
|
||||
vsk->close_work_scheduled = true;
|
||||
schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void hvs_release(struct vsock_sock *vsk)
|
||||
{
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
struct hvsock *hvs = vsk->trans;
|
||||
struct vmbus_channel *chan;
|
||||
bool remove_sock;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
sk->sk_state = TCP_CLOSING;
|
||||
vsock_remove_sock(vsk);
|
||||
|
||||
remove_sock = hvs_close_lock_held(vsk);
|
||||
release_sock(sk);
|
||||
|
||||
chan = hvs->chan;
|
||||
if (chan)
|
||||
hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
|
||||
|
||||
if (remove_sock)
|
||||
vsock_remove_sock(vsk);
|
||||
}
|
||||
|
||||
static void hvs_destruct(struct vsock_sock *vsk)
|
||||
|
@@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
|
||||
if (!virtio_vsock_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = register_virtio_driver(&virtio_vsock_driver);
|
||||
ret = vsock_core_init(&virtio_transport.transport);
|
||||
if (ret)
|
||||
goto out_wq;
|
||||
|
||||
ret = vsock_core_init(&virtio_transport.transport);
|
||||
ret = register_virtio_driver(&virtio_vsock_driver);
|
||||
if (ret)
|
||||
goto out_vdr;
|
||||
goto out_vci;
|
||||
|
||||
return 0;
|
||||
|
||||
out_vdr:
|
||||
unregister_virtio_driver(&virtio_vsock_driver);
|
||||
out_vci:
|
||||
vsock_core_exit();
|
||||
out_wq:
|
||||
destroy_workqueue(virtio_vsock_workqueue);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
static void __exit virtio_vsock_exit(void)
|
||||
{
|
||||
vsock_core_exit();
|
||||
unregister_virtio_driver(&virtio_vsock_driver);
|
||||
vsock_core_exit();
|
||||
destroy_workqueue(virtio_vsock_workqueue);
|
||||
}
|
||||
|
||||
|
@@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
|
||||
|
||||
void virtio_transport_release(struct vsock_sock *vsk)
|
||||
{
|
||||
struct virtio_vsock_sock *vvs = vsk->trans;
|
||||
struct virtio_vsock_pkt *pkt, *tmp;
|
||||
struct sock *sk = &vsk->sk;
|
||||
bool remove_sock = true;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sk->sk_type == SOCK_STREAM)
|
||||
remove_sock = virtio_transport_close(vsk);
|
||||
|
||||
list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
|
||||
list_del(&pkt->list);
|
||||
virtio_transport_free_pkt(pkt);
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
if (remove_sock)
|
||||
|
@@ -3264,7 +3264,8 @@ static void
|
||||
decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
int ihl = iph->ihl;
|
||||
u8 *xprth = skb_network_header(skb) + ihl * 4;
|
||||
struct flowi4 *fl4 = &fl->u.ip4;
|
||||
int oif = 0;
|
||||
|
||||
@@ -3275,6 +3276,11 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
fl4->flowi4_mark = skb->mark;
|
||||
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
|
||||
|
||||
fl4->flowi4_proto = iph->protocol;
|
||||
fl4->daddr = reverse ? iph->saddr : iph->daddr;
|
||||
fl4->saddr = reverse ? iph->daddr : iph->saddr;
|
||||
fl4->flowi4_tos = iph->tos;
|
||||
|
||||
if (!ip_is_fragment(iph)) {
|
||||
switch (iph->protocol) {
|
||||
case IPPROTO_UDP:
|
||||
@@ -3286,7 +3292,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ports;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ports = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_sport = ports[!!reverse];
|
||||
@@ -3298,7 +3304,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
pskb_may_pull(skb, xprth + 2 - skb->data)) {
|
||||
u8 *icmp;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
icmp = xprth;
|
||||
|
||||
fl4->fl4_icmp_type = icmp[0];
|
||||
@@ -3310,7 +3316,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be32 *ehdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ehdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ehdr[0];
|
||||
@@ -3321,7 +3327,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
pskb_may_pull(skb, xprth + 8 - skb->data)) {
|
||||
__be32 *ah_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ah_hdr = (__be32 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = ah_hdr[1];
|
||||
@@ -3332,7 +3338,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
|
||||
__be16 *ipcomp_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
ipcomp_hdr = (__be16 *)xprth;
|
||||
|
||||
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
|
||||
@@ -3344,7 +3350,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
__be16 *greflags;
|
||||
__be32 *gre_hdr;
|
||||
|
||||
xprth = skb_network_header(skb) + iph->ihl * 4;
|
||||
xprth = skb_network_header(skb) + ihl * 4;
|
||||
greflags = (__be16 *)xprth;
|
||||
gre_hdr = (__be32 *)xprth;
|
||||
|
||||
@@ -3360,10 +3366,6 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
|
||||
break;
|
||||
}
|
||||
}
|
||||
fl4->flowi4_proto = iph->protocol;
|
||||
fl4->daddr = reverse ? iph->saddr : iph->daddr;
|
||||
fl4->saddr = reverse ? iph->daddr : iph->saddr;
|
||||
fl4->flowi4_tos = iph->tos;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
In neuem Issue referenzieren
Einen Benutzer sperren