Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Three trivial overlapping conflicts.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-05-02 22:14:21 -04:00
167 changed files with 1378 additions and 730 deletions

View File

@@ -1915,6 +1915,7 @@ static int __init atalk_init(void)
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl) {
pr_crit("Unable to register DDP with SNAP.\n");
rc = -ENOMEM;
goto out_sock;
}

View File

@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
tail[plen - 1] = proto;
}
static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
int encap_type;
struct udphdr *uh;
@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
__be16 sport, dport;
struct xfrm_encap_tmpl *encap = x->encap;
struct ip_esp_hdr *esph = esp->esph;
unsigned int len;
spin_lock_bh(&x->lock);
sport = encap->encap_sport;
@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
encap_type = encap->encap_type;
spin_unlock_bh(&x->lock);
len = skb->len + esp->tailen - skb_transport_offset(skb);
if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
return -EMSGSIZE;
uh = (struct udphdr *)esph;
uh->source = sport;
uh->dest = dport;
uh->len = htons(skb->len + esp->tailen
- skb_transport_offset(skb));
uh->len = htons(len);
uh->check = 0;
switch (encap_type) {
@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
*skb_mac_header(skb) = IPPROTO_UDP;
esp->esph = esph;
return 0;
}
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
int tailen = esp->tailen;
/* this is non-NULL only with UDP Encapsulation */
if (x->encap)
esp_output_udp_encap(x, skb, esp);
if (x->encap) {
int err = esp_output_udp_encap(x, skb, esp);
if (err < 0)
return err;
}
if (!skb_cloned(skb)) {
if (tailen <= skb_tailroom(skb)) {

View File

@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
goto out;
if (sp->len == XFRM_MAX_DEPTH)
goto out;
goto out_reset;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ip_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET);
if (!x)
goto out;
goto out_reset;
sp->xvec[sp->len++] = x;
sp->olen++;
@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo) {
xfrm_state_put(x);
goto out;
goto out_reset;
}
}
@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
xfrm_input(skb, IPPROTO_ESP, spi, -2);
return ERR_PTR(-EINPROGRESS);
out_reset:
secpath_reset(skb);
out:
skb_push(skb, offset);
NAPI_GRO_CB(skb)->same_flow = 0;

View File

@@ -516,6 +516,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;

View File

@@ -630,10 +630,8 @@ static int __init vti_init(void)
msg = "ipip tunnel";
err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
if (err < 0) {
pr_info("%s: cant't register tunnel\n",__func__);
if (err < 0)
goto xfrm_tunnel_failed;
}
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
@@ -643,9 +641,9 @@ static int __init vti_init(void)
return err;
rtnl_link_failed:
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm_tunnel_failed:
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm_tunnel_failed:
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
@@ -660,6 +658,7 @@ pernet_dev_failed:
static void __exit vti_fini(void)
{
rtnl_link_unregister(&vti_link_ops);
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);

View File

@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
((TCP_SKB_CB(tail)->tcp_flags |
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
!((TCP_SKB_CB(tail)->tcp_flags &
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
* thtail->fin, so that the fast path in tcp_rcv_established()
* is not entered if we append a packet with a FIN.
* SYN, RST, URG are not present.
* ACK is set on both packets.
* PSH : we do not really care in TCP stack,
* at least for 'GRO' packets.
*/
thtail->fin |= th->fin;
TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
if (TCP_SKB_CB(skb)->has_rxtstamp) {

View File

@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *pp = NULL;
struct udphdr *uh2;
struct sk_buff *p;
unsigned int ulen;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return NULL;
}
/* Do not deal with padded or malicious packets, sorry ! */
ulen = ntohs(uh->len);
if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
/* pull encapsulating udp header */
skb_gro_pull(skb, sizeof(struct udphdr));
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
* leading to execessive truesize values
* leading to excessive truesize values.
* On len mismatch merge the first packet shorter than gso_size,
* otherwise complete the GRO packet.
*/
if (!skb_gro_receive(p, skb) &&
if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
ulen != ntohs(uh2->len) ||
NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
pp = p;
else if (uh->len != uh2->len)
pp = p;
return pp;
}

View File

@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
goto out;
if (sp->len == XFRM_MAX_DEPTH)
goto out;
goto out_reset;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ipv6_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET6);
if (!x)
goto out;
goto out_reset;
sp->xvec[sp->len++] = x;
sp->olen++;
@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo) {
xfrm_state_put(x);
goto out;
goto out_reset;
}
}
@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
xfrm_input(skb, IPPROTO_ESP, spi, -2);
return ERR_PTR(-EINPROGRESS);
out_reset:
secpath_reset(skb);
out:
skb_push(skb, offset);
NAPI_GRO_CB(skb)->same_flow = 0;

View File

@@ -916,9 +916,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
if (pcpu_rt) {
struct fib6_info *from;
from = rcu_dereference_protected(pcpu_rt->from,
lockdep_is_held(&table->tb6_lock));
rcu_assign_pointer(pcpu_rt->from, NULL);
from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
fib6_info_release(from);
}
}

View File

@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
return fl;
}
static void fl_free_rcu(struct rcu_head *head)
{
struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
if (fl->share == IPV6_FL_S_PROCESS)
put_pid(fl->owner.pid);
kfree(fl->opt);
kfree(fl);
}
static void fl_free(struct ip6_flowlabel *fl)
{
if (fl) {
if (fl->share == IPV6_FL_S_PROCESS)
put_pid(fl->owner.pid);
kfree(fl->opt);
kfree_rcu(fl, rcu);
}
if (fl)
call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ recheck:
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
((fl1->share == IPV6_FL_S_PROCESS) &&
(fl1->owner.pid == fl->owner.pid)) ||
(fl1->owner.pid != fl->owner.pid)) ||
((fl1->share == IPV6_FL_S_USER) &&
uid_eq(fl1->owner.uid, fl->owner.uid)))
!uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -ENOMEM;

View File

@@ -380,11 +380,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
rcu_read_lock();
from = rcu_dereference(rt->from);
rcu_assign_pointer(rt->from, NULL);
from = xchg((__force struct fib6_info **)&rt->from, NULL);
fib6_info_release(from);
rcu_read_unlock();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1323,9 +1320,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
/* purge completely the exception to allow releasing the held resources:
* some [sk] cache may keep the dst around for unlimited time
*/
from = rcu_dereference_protected(rt6_ex->rt6i->from,
lockdep_is_held(&rt6_exception_lock));
rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);
@@ -3495,11 +3490,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
rcu_read_lock();
res.f6i = rcu_dereference(rt->from);
/* This fib6_info_hold() is safe here because we hold reference to rt
* and rt already holds reference to fib6_info.
*/
fib6_info_hold(res.f6i);
rcu_read_unlock();
if (!res.f6i)
goto out;
res.nh = &res.f6i->fib6_nh;
res.fib6_flags = res.f6i->fib6_flags;
@@ -3514,10 +3506,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
/* No need to remove rt from the exception table if rt is
* a cached route because rt6_insert_exception() will
* takes care of it
*/
/* rt6_insert_exception() will take care of duplicated exceptions */
if (rt6_insert_exception(nrt, &res)) {
dst_release_immediate(&nrt->dst);
goto out;
@@ -3530,7 +3519,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
out:
fib6_info_release(res.f6i);
rcu_read_unlock();
neigh_release(neigh);
}
@@ -3772,23 +3761,34 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
int type;
struct dst_entry *dst = skb_dst(skb);
struct net *net = dev_net(dst->dev);
struct inet6_dev *idev;
int type;
if (netif_is_l3_master(skb->dev) &&
dst->dev == net->loopback_dev)
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
else
idev = ip6_dst_idev(dst);
switch (ipstats_mib_noroutes) {
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
if (type == IPV6_ADDR_ANY) {
IP6_INC_STATS(dev_net(dst->dev),
__in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INADDRERRORS);
IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
break;
}
/* FALLTHROUGH */
case IPSTATS_MIB_OUTNOROUTES:
IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
ipstats_mib_noroutes);
IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
break;
}
/* Start over by dropping the dst for l3mdev case */
if (netif_is_l3_master(skb->dev))
skb_dst_drop(skb);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
kfree_skb(skb);
return 0;
@@ -5056,16 +5056,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
rcu_read_lock();
from = rcu_dereference(rt->from);
if (fibmatch)
err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
else
err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
&fl6.saddr, iif, RTM_NEWROUTE,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
0);
if (from) {
if (fibmatch)
err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
iif, RTM_NEWROUTE,
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
else
err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
&fl6.saddr, iif, RTM_NEWROUTE,
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
} else {
err = -ENETUNREACH;
}
rcu_read_unlock();
if (err < 0) {

View File

@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
unsigned int i;
xfrm_flush_gc();
xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
xfrm_state_flush(net, 0, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
/* Someone maybe has gotten the xfrm6_tunnel_spi.
* So need to wait it.
*/
rcu_barrier();
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}

View File

@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
if (rq->sadb_x_ipsecrequest_mode == 0)
return -EINVAL;
if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
return -EINVAL;
t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
t->id.proto = rq->sadb_x_ipsecrequest_proto;
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
t->mode = mode;

View File

@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
if (tunnel->tunnel_id == tunnel_id) {
l2tp_tunnel_inc_refcount(tunnel);
if (tunnel->tunnel_id == tunnel_id &&
refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
if (++count > nth) {
l2tp_tunnel_inc_refcount(tunnel);
if (++count > nth &&
refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
}
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;
tunnel = l2tp_tunnel(sk);
tunnel = rcu_dereference_sk_user_data(sk);
if (tunnel == NULL)
goto pass_up;

View File

@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
dir = sdata->vif.debugfs_dir;
if (!dir)
if (IS_ERR_OR_NULL(dir))
return;
sprintf(buf, "netdev:%s", sdata->name);

View File

@@ -112,8 +112,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_CAP_TX_STBC);
/* Allow user to configure RX STBC bits */
if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC)
ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC;
if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
IEEE80211_HT_CAP_RX_STBC;
/* Allow user to decrease AMPDU factor */
if (ht_capa_mask->ampdu_params_info &

View File

@@ -1907,6 +1907,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
if (sdata->vif.txq)
ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
synchronize_rcu();
if (sdata->dev) {

View File

@@ -362,8 +362,8 @@ int genl_register_family(struct genl_family *family)
} else
family->attrbuf = NULL;
family->id = idr_alloc(&genl_fam_idr, family,
start, end + 1, GFP_KERNEL);
family->id = idr_alloc_cyclic(&genl_fam_idr, family,
start, end + 1, GFP_KERNEL);
if (family->id < 0) {
err = family->id;
goto errout_free;

View File

@@ -2600,8 +2600,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
unsigned char *addr = NULL;
int tp_len, size_max;
unsigned char *addr;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
@@ -2612,7 +2612,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2622,10 +2621,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out_put;
if (po->sk.sk_socket->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_put;
addr = saddr->sll_addr;
}
}
err = -ENXIO;
@@ -2797,7 +2799,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
unsigned char *addr;
unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2814,7 +2816,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2822,10 +2823,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out_unlock;
if (sock->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_unlock;
addr = saddr->sll_addr;
}
}
err = -ENXIO;
@@ -3342,20 +3346,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
int copy_len;
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
copy_len = msg->msg_namelen;
if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
memset(msg->msg_name +
offsetof(struct sockaddr_ll, sll_addr),
0, sizeof(sll->sll_addr));
msg->msg_namelen = sizeof(struct sockaddr_ll);
}
}
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
msg->msg_namelen);
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {

View File

@@ -772,7 +772,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
uint64_t uncongested = 0;
__le64 uncongested = 0;
void *addr;
/* catch completely corrupt packets */
@@ -789,7 +789,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
copied = 0;
while (copied < RDS_CONG_MAP_BYTES) {
uint64_t *src, *dst;
__le64 *src, *dst;
unsigned int k;
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
@@ -824,9 +824,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
}
/* the congestion map is in little endian order */
uncongested = le64_to_cpu(uncongested);
rds_cong_map_updated(map, uncongested);
rds_cong_map_updated(map, le64_to_cpu(uncongested));
}
static void rds_ib_process_recv(struct rds_connection *conn,

View File

@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter("");
if (list_empty(&rxnet->calls))
return;
if (!list_empty(&rxnet->calls)) {
write_lock(&rxnet->call_lock);
write_lock(&rxnet->call_lock);
while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next,
struct rxrpc_call, link);
_debug("Zapping call %p", call);
while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
_debug("Zapping call %p", call);
rxrpc_see_call(call);
list_del_init(&call->link);
rxrpc_see_call(call);
list_del_init(&call->link);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->flags, call->events);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->flags, call->events);
write_unlock(&rxnet->call_lock);
cond_resched();
write_lock(&rxnet->call_lock);
}
write_unlock(&rxnet->call_lock);
cond_resched();
write_lock(&rxnet->call_lock);
}
write_unlock(&rxnet->call_lock);
atomic_dec(&rxnet->nr_calls);
wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
}

View File

@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
}
/* Sent the next ASCONF packet currently stored in the association.
* This happens after the ASCONF_ACK was succeffully processed.
*/
static void sctp_cmd_send_asconf(struct sctp_association *asoc)
{
struct net *net = sock_net(asoc->base.sk);
/* Send the next asconf chunk from the addip chunk
* queue.
*/
if (!list_empty(&asoc->addip_chunk_list)) {
struct list_head *entry = asoc->addip_chunk_list.next;
struct sctp_chunk *asconf = list_entry(entry,
struct sctp_chunk, list);
list_del_init(entry);
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(asconf);
if (sctp_primitive_ASCONF(net, asoc, asconf))
sctp_chunk_free(asconf);
else
asoc->addip_last_asconf = asconf;
}
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
}
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
break;
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;

View File

@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
static enum sctp_disposition sctp_send_next_asconf(
struct net *net,
const struct sctp_endpoint *ep,
struct sctp_association *asoc,
const union sctp_subtype type,
struct sctp_cmd_seq *commands)
{
struct sctp_chunk *asconf;
struct list_head *entry;
if (list_empty(&asoc->addip_chunk_list))
return SCTP_DISPOSITION_CONSUME;
entry = asoc->addip_chunk_list.next;
asconf = list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
sctp_chunk_hold(asconf);
asoc->addip_last_asconf = asconf;
return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
asconf_ack))
return sctp_send_next_asconf(net, ep,
(struct sctp_association *)asoc,
type, commands);
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(struct sctp_errhdr));

View File

@@ -580,7 +580,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
int err = 0, offset = rxm->offset, copy, nsg;
int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -611,27 +611,44 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
copy = min_t(int, skb_pagelen(skb) - offset,
rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
if (skb->decrypted)
skb_store_bits(skb, offset, buf, copy);
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
offset += copy;
buf += copy;
skb_walk_frags(skb, skb_iter) {
copy = min_t(int, skb_iter->len,
rxm->full_len - offset + rxm->offset -
TLS_CIPHER_AES_GCM_128_TAG_SIZE);
if (skb_iter->decrypted)
skb_store_bits(skb_iter, offset, buf, copy);
if (skb->decrypted)
skb_store_bits(skb, offset, buf, copy);
offset += copy;
buf += copy;
}
pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
int frag_pos;
/* Practically all frags must belong to msg if reencrypt
* is needed with current strparser and coalescing logic,
* but strparser may "get optimized", so let's be safe.
*/
if (pos + skb_iter->len <= offset)
goto done_with_frag;
if (pos >= data_len + rxm->offset)
break;
frag_pos = offset - pos;
copy = min_t(int, skb_iter->len - frag_pos,
data_len + rxm->offset - offset);
if (skb_iter->decrypted)
skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
done_with_frag:
pos += skb_iter->len;
}
free_buf:
kfree(orig_buf);
return err;

View File

@@ -201,13 +201,14 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
skb_put(nskb, skb->len);
memcpy(nskb->data, skb->data, headln);
update_chksum(nskb, headln);
nskb->destructor = skb->destructor;
nskb->sk = sk;
skb->destructor = NULL;
skb->sk = NULL;
update_chksum(nskb, headln);
delta = nskb->truesize - skb->truesize;
if (likely(delta < 0))
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));

View File

@@ -3769,10 +3769,9 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
/*
* The last request may have been received before this
* registration call. Call the driver notifier if
* initiator is USER and user type is CELL_BASE.
* initiator is USER.
*/
if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
reg_call_notifier(wiphy, lr);
}

View File

@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
return NULL;
}
static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
unsigned short family)
{
struct xfrmi_net *xfrmn;
int ifindex;
struct xfrm_if *xi;
int ifindex = 0;
if (!secpath_exists(skb) || !skb->dev)
return NULL;
switch (family) {
case AF_INET6:
ifindex = inet6_sdif(skb);
break;
case AF_INET:
ifindex = inet_sdif(skb);
break;
}
if (!ifindex)
ifindex = skb->dev->ifindex;
xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
ifindex = skb->dev->ifindex;
for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
if (ifindex == xi->dev->ifindex &&

View File

@@ -3519,7 +3519,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
ifcb = xfrm_if_get_cb();
if (ifcb) {
xi = ifcb->decode_session(skb);
xi = ifcb->decode_session(skb, family);
if (xi) {
if_id = xi->p.if_id;
net = xi->net;

View File

@@ -2340,7 +2340,7 @@ void xfrm_state_fini(struct net *net)
flush_work(&net->xfrm.state_hash_work);
flush_work(&xfrm_state_gc_work);
xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
xfrm_state_flush(net, 0, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));

View File

@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
ret = verify_policy_dir(p->dir);
if (ret)
return ret;
if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
return -EINVAL;
return 0;
@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
return -EINVAL;
}
switch (ut[i].id.proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
#if IS_ENABLED(CONFIG_IPV6)
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
#endif
case IPSEC_PROTO_ANY:
break;
default:
if (!xfrm_id_proto_valid(ut[i].id.proto))
return -EINVAL;
}
}
return 0;