Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix hotplug deadlock in hv_netvsc, from Stephen Hemminger. 2) Fix double-free in rmnet driver, from Dan Carpenter. 3) INET connection socket layer can double put request sockets, fix from Eric Dumazet. 4) Don't match collect metadata-mode tunnels if the device is down, from Haishuang Yan. 5) Do not perform TSO6/GSO on ipv6 packets with extensions headers in be2net driver, from Suresh Reddy. 6) Fix scaling error in gen_estimator, from Eric Dumazet. 7) Fix 64-bit statistics deadlock in systemport driver, from Florian Fainelli. 8) Fix use-after-free in sctp_sock_dump, from Xin Long. 9) Reject invalid BPF_END instructions in verifier, from Edward Cree. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (43 commits) mlxsw: spectrum_router: Only handle IPv4 and IPv6 events Documentation: link in networking docs tcp: fix data delivery rate bpf/verifier: reject BPF_ALU64|BPF_END sctp: do not mark sk dumped when inet_sctp_diag_fill returns err sctp: fix an use-after-free issue in sctp_sock_dump netvsc: increase default receive buffer size tcp: update skb->skb_mstamp more carefully net: ipv4: fix l3slave check for index returned in IP_PKTINFO net: smsc911x: Quieten netif during suspend net: systemport: Fix 64-bit stats deadlock net: vrf: avoid gcc-4.6 warning qed: remove unnecessary call to memset tg3: clean up redundant initialization of tnapi tls: make tls_sw_free_resources static sctp: potential read out of bounds in sctp_ulpevent_type_enabled() MAINTAINERS: review Renesas DT bindings as well net_sched: gen_estimator: fix scaling error in bytes/packets samples nfp: wait for the NSP resource to appear on boot nfp: wait for board state before talking to the NSP ...
此提交包含在:
@@ -2506,21 +2506,19 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
const struct bpf_prog *map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
struct net_device *fwd = NULL;
|
||||
u32 index = ri->ifindex;
|
||||
struct net_device *fwd;
|
||||
int err;
|
||||
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = NULL;
|
||||
|
||||
/* This is really only caused by a deliberately crappy
|
||||
* BPF program, normally we would never hit that case,
|
||||
* so no need to inform someone via tracepoints either,
|
||||
* just bail out.
|
||||
*/
|
||||
if (unlikely(map_owner != xdp_prog))
|
||||
return -EINVAL;
|
||||
if (unlikely(map_owner != xdp_prog)) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
fwd = __dev_map_lookup_elem(map, index);
|
||||
if (!fwd) {
|
||||
@@ -2576,13 +2574,27 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
|
||||
const struct bpf_prog *map_owner = ri->map_owner;
|
||||
struct bpf_map *map = ri->map;
|
||||
struct net_device *fwd = NULL;
|
||||
u32 index = ri->ifindex;
|
||||
struct net_device *fwd;
|
||||
unsigned int len;
|
||||
int err = 0;
|
||||
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
ri->ifindex = 0;
|
||||
ri->map = NULL;
|
||||
ri->map_owner = NULL;
|
||||
|
||||
if (map) {
|
||||
if (unlikely(map_owner != xdp_prog)) {
|
||||
err = -EFAULT;
|
||||
map = NULL;
|
||||
goto err;
|
||||
}
|
||||
fwd = __dev_map_lookup_elem(map, index);
|
||||
} else {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
}
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
@@ -2600,10 +2612,12 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
skb->dev = fwd;
|
||||
_trace_xdp_redirect(dev, xdp_prog, index);
|
||||
map ? _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index)
|
||||
: _trace_xdp_redirect(dev, xdp_prog, index);
|
||||
return 0;
|
||||
err:
|
||||
_trace_xdp_redirect_err(dev, xdp_prog, index, err);
|
||||
map ? _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err)
|
||||
: _trace_xdp_redirect_err(dev, xdp_prog, index, err);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
|
||||
|
@@ -83,10 +83,10 @@ static void est_timer(unsigned long arg)
|
||||
u64 rate, brate;
|
||||
|
||||
est_fetch_counters(est, &b);
|
||||
brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log);
|
||||
brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
|
||||
brate -= (est->avbps >> est->ewma_log);
|
||||
|
||||
rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log);
|
||||
rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
|
||||
rate -= (est->avpps >> est->ewma_log);
|
||||
|
||||
write_seqcount_begin(&est->seq);
|
||||
|
@@ -916,7 +916,6 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
|
||||
tcp_sk(child)->fastopen_rsk = NULL;
|
||||
}
|
||||
inet_csk_destroy_sock(child);
|
||||
reqsk_put(req);
|
||||
}
|
||||
|
||||
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
|
||||
@@ -987,6 +986,7 @@ void inet_csk_listen_stop(struct sock *sk)
|
||||
sock_hold(child);
|
||||
|
||||
inet_child_forget(sk, req, child);
|
||||
reqsk_put(req);
|
||||
bh_unlock_sock(child);
|
||||
local_bh_enable();
|
||||
sock_put(child);
|
||||
|
@@ -1207,7 +1207,6 @@ e_inval:
|
||||
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
|
||||
bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
|
||||
bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
|
||||
ipv6_sk_rxinfo(sk);
|
||||
|
||||
@@ -1221,8 +1220,13 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
||||
* (e.g., process binds socket to eth0 for Tx which is
|
||||
* redirected to loopback in the rtable/dst).
|
||||
*/
|
||||
if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX || l3slave)
|
||||
struct rtable *rt = skb_rtable(skb);
|
||||
bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
|
||||
|
||||
if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
|
||||
pktinfo->ipi_ifindex = inet_iif(skb);
|
||||
else if (l3slave && rt && rt->rt_iif)
|
||||
pktinfo->ipi_ifindex = rt->rt_iif;
|
||||
|
||||
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
|
||||
} else {
|
||||
|
@@ -176,7 +176,7 @@ skip_key_lookup:
|
||||
return cand;
|
||||
|
||||
t = rcu_dereference(itn->collect_md_tun);
|
||||
if (t)
|
||||
if (t && t->dev->flags & IFF_UP)
|
||||
return t;
|
||||
|
||||
if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
|
||||
|
@@ -991,6 +991,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
struct tcp_skb_cb *tcb;
|
||||
struct tcp_out_options opts;
|
||||
unsigned int tcp_options_size, tcp_header_size;
|
||||
struct sk_buff *oskb = NULL;
|
||||
struct tcp_md5sig_key *md5;
|
||||
struct tcphdr *th;
|
||||
int err;
|
||||
@@ -998,12 +999,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
BUG_ON(!skb || !tcp_skb_pcount(skb));
|
||||
tp = tcp_sk(sk);
|
||||
|
||||
skb->skb_mstamp = tp->tcp_mstamp;
|
||||
if (clone_it) {
|
||||
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
|
||||
- tp->snd_una;
|
||||
tcp_rate_skb_sent(sk, skb);
|
||||
|
||||
oskb = skb;
|
||||
if (unlikely(skb_cloned(skb)))
|
||||
skb = pskb_copy(skb, gfp_mask);
|
||||
else
|
||||
@@ -1011,6 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
if (unlikely(!skb))
|
||||
return -ENOBUFS;
|
||||
}
|
||||
skb->skb_mstamp = tp->tcp_mstamp;
|
||||
|
||||
inet = inet_sk(sk);
|
||||
tcb = TCP_SKB_CB(skb);
|
||||
@@ -1122,12 +1122,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||
|
||||
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
|
||||
|
||||
if (likely(err <= 0))
|
||||
return err;
|
||||
|
||||
tcp_enter_cwr(sk);
|
||||
|
||||
return net_xmit_eval(err);
|
||||
if (unlikely(err > 0)) {
|
||||
tcp_enter_cwr(sk);
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
if (!err && oskb) {
|
||||
oskb->skb_mstamp = tp->tcp_mstamp;
|
||||
tcp_rate_skb_sent(sk, oskb);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/* This routine just queues the buffer for sending.
|
||||
@@ -2869,10 +2872,11 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
|
||||
skb_headroom(skb) >= 0xFFFF)) {
|
||||
struct sk_buff *nskb;
|
||||
|
||||
skb->skb_mstamp = tp->tcp_mstamp;
|
||||
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
|
||||
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
|
||||
-ENOBUFS;
|
||||
if (!err)
|
||||
skb->skb_mstamp = tp->tcp_mstamp;
|
||||
} else {
|
||||
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
||||
}
|
||||
|
@@ -171,7 +171,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
|
||||
}
|
||||
|
||||
t = rcu_dereference(ip6n->collect_md_tun);
|
||||
if (t)
|
||||
if (t && t->dev->flags & IFF_UP)
|
||||
return t;
|
||||
|
||||
t = rcu_dereference(ip6n->tnls_wc[0]);
|
||||
|
@@ -72,10 +72,6 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
|
||||
|
||||
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
|
||||
|
||||
/* make sure it's a Segment Routing header (Routing Type 4) */
|
||||
if (srh->type != IPV6_SRCRT_TYPE_4)
|
||||
return NULL;
|
||||
|
||||
len = (srh->hdrlen + 1) << 3;
|
||||
|
||||
if (!pskb_may_pull(skb, srhoff + len))
|
||||
|
@@ -1112,7 +1112,8 @@ static int ovs_nla_init_match_and_action(struct net *net,
|
||||
if (!a[OVS_FLOW_ATTR_KEY]) {
|
||||
OVS_NLERR(log,
|
||||
"Flow key attribute not present in set flow.");
|
||||
return -EINVAL;
|
||||
error = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
*acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
|
||||
|
@@ -53,10 +53,13 @@ static void tcf_action_goto_chain_exec(const struct tc_action *a,
|
||||
res->goto_tp = rcu_dereference_bh(chain->filter_chain);
|
||||
}
|
||||
|
||||
static void free_tcf(struct rcu_head *head)
|
||||
/* XXX: For standalone actions, we don't need a RCU grace period either, because
|
||||
* actions are always connected to filters and filters are already destroyed in
|
||||
* RCU callbacks, so after a RCU grace period actions are already disconnected
|
||||
* from filters. Readers later can not find us.
|
||||
*/
|
||||
static void free_tcf(struct tc_action *p)
|
||||
{
|
||||
struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu);
|
||||
|
||||
free_percpu(p->cpu_bstats);
|
||||
free_percpu(p->cpu_qstats);
|
||||
|
||||
@@ -76,11 +79,7 @@ static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
|
||||
idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
|
||||
spin_unlock_bh(&idrinfo->lock);
|
||||
gen_kill_estimator(&p->tcfa_rate_est);
|
||||
/*
|
||||
* gen_estimator est_timer() might access p->tcfa_lock
|
||||
* or bstats, wait a RCU grace period before freeing p
|
||||
*/
|
||||
call_rcu(&p->tcfa_rcu, free_tcf);
|
||||
free_tcf(p);
|
||||
}
|
||||
|
||||
int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
|
||||
@@ -181,7 +180,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
|
||||
idr_for_each_entry_ext(idr, p, id) {
|
||||
ret = __tcf_idr_release(p, false, true);
|
||||
if (ret == ACT_P_DELETED) {
|
||||
module_put(p->ops->owner);
|
||||
module_put(ops->owner);
|
||||
n_i++;
|
||||
} else if (ret < 0) {
|
||||
goto nla_put_failure;
|
||||
@@ -259,7 +258,7 @@ void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est)
|
||||
{
|
||||
if (est)
|
||||
gen_kill_estimator(&a->tcfa_rate_est);
|
||||
call_rcu(&a->tcfa_rcu, free_tcf);
|
||||
free_tcf(a);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_idr_cleanup);
|
||||
|
||||
@@ -515,13 +514,15 @@ EXPORT_SYMBOL(tcf_action_exec);
|
||||
|
||||
int tcf_action_destroy(struct list_head *actions, int bind)
|
||||
{
|
||||
const struct tc_action_ops *ops;
|
||||
struct tc_action *a, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry_safe(a, tmp, actions, list) {
|
||||
ops = a->ops;
|
||||
ret = __tcf_idr_release(a, bind, true);
|
||||
if (ret == ACT_P_DELETED)
|
||||
module_put(a->ops->owner);
|
||||
module_put(ops->owner);
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@@ -182,7 +182,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
|
||||
list_add_tail(&chain->list, &block->chain_list);
|
||||
chain->block = block;
|
||||
chain->index = chain_index;
|
||||
chain->refcnt = 0;
|
||||
chain->refcnt = 1;
|
||||
return chain;
|
||||
}
|
||||
|
||||
@@ -194,21 +194,20 @@ static void tcf_chain_flush(struct tcf_chain *chain)
|
||||
RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
|
||||
while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
|
||||
RCU_INIT_POINTER(chain->filter_chain, tp->next);
|
||||
tcf_chain_put(chain);
|
||||
tcf_proto_destroy(tp);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcf_chain_destroy(struct tcf_chain *chain)
|
||||
{
|
||||
/* May be already removed from the list by the previous call. */
|
||||
if (!list_empty(&chain->list))
|
||||
list_del_init(&chain->list);
|
||||
list_del(&chain->list);
|
||||
kfree(chain);
|
||||
}
|
||||
|
||||
/* There might still be a reference held when we got here from
|
||||
* tcf_block_put. Wait for the user to drop reference before free.
|
||||
*/
|
||||
if (!chain->refcnt)
|
||||
kfree(chain);
|
||||
static void tcf_chain_hold(struct tcf_chain *chain)
|
||||
{
|
||||
++chain->refcnt;
|
||||
}
|
||||
|
||||
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
|
||||
@@ -217,24 +216,19 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
|
||||
struct tcf_chain *chain;
|
||||
|
||||
list_for_each_entry(chain, &block->chain_list, list) {
|
||||
if (chain->index == chain_index)
|
||||
goto incref;
|
||||
if (chain->index == chain_index) {
|
||||
tcf_chain_hold(chain);
|
||||
return chain;
|
||||
}
|
||||
}
|
||||
chain = create ? tcf_chain_create(block, chain_index) : NULL;
|
||||
|
||||
incref:
|
||||
if (chain)
|
||||
chain->refcnt++;
|
||||
return chain;
|
||||
return create ? tcf_chain_create(block, chain_index) : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_chain_get);
|
||||
|
||||
void tcf_chain_put(struct tcf_chain *chain)
|
||||
{
|
||||
/* Destroy unused chain, with exception of chain 0, which is the
|
||||
* default one and has to be always present.
|
||||
*/
|
||||
if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0)
|
||||
if (--chain->refcnt == 0)
|
||||
tcf_chain_destroy(chain);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_chain_put);
|
||||
@@ -279,10 +273,31 @@ void tcf_block_put(struct tcf_block *block)
|
||||
if (!block)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
|
||||
/* XXX: Standalone actions are not allowed to jump to any chain, and
|
||||
* bound actions should be all removed after flushing. However,
|
||||
* filters are destroyed in RCU callbacks, we have to hold the chains
|
||||
* first, otherwise we would always race with RCU callbacks on this list
|
||||
* without proper locking.
|
||||
*/
|
||||
|
||||
/* Wait for existing RCU callbacks to cool down. */
|
||||
rcu_barrier();
|
||||
|
||||
/* Hold a refcnt for all chains, except 0, in case they are gone. */
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
if (chain->index)
|
||||
tcf_chain_hold(chain);
|
||||
|
||||
/* No race on the list, because no chain could be destroyed. */
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_flush(chain);
|
||||
tcf_chain_destroy(chain);
|
||||
}
|
||||
|
||||
/* Wait for RCU callbacks to release the reference count. */
|
||||
rcu_barrier();
|
||||
|
||||
/* At this point, all the chains should have refcnt == 1. */
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||
tcf_chain_put(chain);
|
||||
kfree(block);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_put);
|
||||
@@ -360,6 +375,7 @@ static void tcf_chain_tp_insert(struct tcf_chain *chain,
|
||||
rcu_assign_pointer(*chain->p_filter_chain, tp);
|
||||
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
|
||||
rcu_assign_pointer(*chain_info->pprev, tp);
|
||||
tcf_chain_hold(chain);
|
||||
}
|
||||
|
||||
static void tcf_chain_tp_remove(struct tcf_chain *chain,
|
||||
@@ -371,6 +387,7 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain,
|
||||
if (chain->p_filter_chain && tp == chain->filter_chain)
|
||||
RCU_INIT_POINTER(*chain->p_filter_chain, next);
|
||||
RCU_INIT_POINTER(*chain_info->pprev, next);
|
||||
tcf_chain_put(chain);
|
||||
}
|
||||
|
||||
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
|
||||
|
@@ -389,7 +389,7 @@ static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
|
||||
if ((data->hgenerator += 0x10000) == 0)
|
||||
data->hgenerator = 0x10000;
|
||||
h = data->hgenerator|salt;
|
||||
if (rsvp_get(tp, h) == 0)
|
||||
if (!rsvp_get(tp, h))
|
||||
return h;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -279,9 +279,11 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_sock_dump(struct sock *sk, void *p)
|
||||
static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct sk_buff *skb = commp->skb;
|
||||
struct netlink_callback *cb = commp->cb;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
@@ -289,9 +291,7 @@ static int sctp_sock_dump(struct sock *sk, void *p)
|
||||
int err = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
if (!sctp_sk(sk)->ep)
|
||||
goto release;
|
||||
list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
|
||||
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
||||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
|
||||
@@ -309,7 +309,6 @@ static int sctp_sock_dump(struct sock *sk, void *p)
|
||||
cb->nlh->nlmsg_seq,
|
||||
NLM_F_MULTI, cb->nlh,
|
||||
commp->net_admin) < 0) {
|
||||
cb->args[3] = 1;
|
||||
err = 1;
|
||||
goto release;
|
||||
}
|
||||
@@ -327,40 +326,30 @@ next:
|
||||
cb->args[4]++;
|
||||
}
|
||||
cb->args[1] = 0;
|
||||
cb->args[2]++;
|
||||
cb->args[3] = 0;
|
||||
cb->args[4] = 0;
|
||||
release:
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_get_sock(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct netlink_callback *cb = commp->cb;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
struct sctp_association *assoc =
|
||||
list_entry(ep->asocs.next, struct sctp_association, asocs);
|
||||
|
||||
/* find the ep only once through the transports by this condition */
|
||||
if (tsp->asoc != assoc)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
|
||||
goto out;
|
||||
|
||||
sock_hold(sk);
|
||||
cb->args[5] = (long)sk;
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
||||
out:
|
||||
cb->args[2]++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
|
||||
@@ -503,12 +492,8 @@ skip:
|
||||
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
|
||||
goto done;
|
||||
|
||||
next:
|
||||
cb->args[5] = 0;
|
||||
sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
|
||||
|
||||
if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
|
||||
goto next;
|
||||
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
|
||||
net, (int *)&cb->args[2], &commp);
|
||||
|
||||
done:
|
||||
cb->args[1] = cb->args[4];
|
||||
|
@@ -4658,29 +4658,39 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
||||
|
||||
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
struct net *net, int pos, void *p) {
|
||||
int (*cb_done)(struct sctp_transport *, void *),
|
||||
struct net *net, int *pos, void *p) {
|
||||
struct rhashtable_iter hti;
|
||||
void *obj;
|
||||
int err;
|
||||
struct sctp_transport *tsp;
|
||||
int ret;
|
||||
|
||||
err = sctp_transport_walk_start(&hti);
|
||||
if (err)
|
||||
return err;
|
||||
again:
|
||||
ret = sctp_transport_walk_start(&hti);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = sctp_transport_get_idx(net, &hti, pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
|
||||
struct sctp_transport *transport = obj;
|
||||
|
||||
if (!sctp_transport_hold(transport))
|
||||
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
|
||||
if (!sctp_transport_hold(tsp))
|
||||
continue;
|
||||
err = cb(transport, p);
|
||||
sctp_transport_put(transport);
|
||||
if (err)
|
||||
ret = cb(tsp, p);
|
||||
if (ret)
|
||||
break;
|
||||
(*pos)++;
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
sctp_transport_walk_stop(&hti);
|
||||
|
||||
return err;
|
||||
if (ret) {
|
||||
if (cb_done && !cb_done(tsp, p)) {
|
||||
(*pos)++;
|
||||
sctp_transport_put(tsp);
|
||||
goto again;
|
||||
}
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_for_each_transport);
|
||||
|
||||
|
@@ -639,7 +639,7 @@ sendpage_end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tls_sw_free_resources(struct sock *sk)
|
||||
static void tls_sw_free_resources(struct sock *sk)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
|
||||
|
新增問題並參考
封鎖使用者