Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit42cea83f95
(IB/mlx5: Fix cleanup order on unload) added to for-rc and commitb5ca15ad7e
(IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
|
||||
return res;
|
||||
out:
|
||||
if (res == ACT_P_CREATED)
|
||||
tcf_idr_cleanup(*act, est);
|
||||
tcf_idr_release(*act, bind);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
|
||||
{
|
||||
struct sctphdr *sctph;
|
||||
|
||||
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
|
||||
if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
|
||||
return 1;
|
||||
|
||||
sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
|
||||
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a)
|
||||
struct tcf_csum_params *params;
|
||||
|
||||
params = rcu_dereference_protected(p->params, 1);
|
||||
kfree_rcu(params, rcu);
|
||||
if (params)
|
||||
kfree_rcu(params, rcu);
|
||||
}
|
||||
|
||||
static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
|
||||
|
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
|
||||
static void tcf_ipt_release(struct tc_action *a)
|
||||
{
|
||||
struct tcf_ipt *ipt = to_ipt(a);
|
||||
ipt_destroy_target(ipt->tcfi_t);
|
||||
|
||||
if (ipt->tcfi_t) {
|
||||
ipt_destroy_target(ipt->tcfi_t);
|
||||
kfree(ipt->tcfi_t);
|
||||
}
|
||||
kfree(ipt->tcfi_tname);
|
||||
kfree(ipt->tcfi_t);
|
||||
}
|
||||
|
||||
static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
|
||||
@@ -187,7 +190,7 @@ err2:
|
||||
kfree(tname);
|
||||
err1:
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_cleanup(*a, est);
|
||||
tcf_idr_release(*a, bind);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
|
||||
p = to_pedit(*a);
|
||||
keys = kmalloc(ksize, GFP_KERNEL);
|
||||
if (keys == NULL) {
|
||||
tcf_idr_cleanup(*a, est);
|
||||
tcf_idr_release(*a, bind);
|
||||
kfree(keys_ex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@@ -196,7 +196,7 @@ failure:
|
||||
qdisc_put_rtab(P_tab);
|
||||
qdisc_put_rtab(R_tab);
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_cleanup(*a, est);
|
||||
tcf_idr_release(*a, bind);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
|
||||
|
||||
psample_group = rtnl_dereference(s->psample_group);
|
||||
RCU_INIT_POINTER(s->psample_group, NULL);
|
||||
psample_group_put(psample_group);
|
||||
if (psample_group)
|
||||
psample_group_put(psample_group);
|
||||
}
|
||||
|
||||
static bool tcf_sample_dev_ok_push(struct net_device *dev)
|
||||
|
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
|
||||
d = to_defact(*a);
|
||||
ret = alloc_defdata(d, defdata);
|
||||
if (ret < 0) {
|
||||
tcf_idr_cleanup(*a, est);
|
||||
tcf_idr_release(*a, bind);
|
||||
return ret;
|
||||
}
|
||||
d->tcf_action = parm->action;
|
||||
|
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
|
||||
ASSERT_RTNL();
|
||||
p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
|
||||
if (unlikely(!p)) {
|
||||
if (ovr)
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a)
|
||||
struct tcf_skbmod_params *p;
|
||||
|
||||
p = rcu_dereference_protected(d->skbmod_p, 1);
|
||||
kfree_rcu(p, rcu);
|
||||
if (p)
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
|
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a)
|
||||
struct tcf_tunnel_key_params *params;
|
||||
|
||||
params = rcu_dereference_protected(t->params, 1);
|
||||
if (params) {
|
||||
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(¶ms->tcft_enc_metadata->dst);
|
||||
|
||||
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(¶ms->tcft_enc_metadata->dst);
|
||||
|
||||
kfree_rcu(params, rcu);
|
||||
kfree_rcu(params, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
static int tunnel_key_dump_addresses(struct sk_buff *skb,
|
||||
|
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
|
||||
ASSERT_RTNL();
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p) {
|
||||
if (ovr)
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_release(*a, bind);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a)
|
||||
struct tcf_vlan_params *p;
|
||||
|
||||
p = rcu_dereference_protected(v->vlan_p, 1);
|
||||
kfree_rcu(p, rcu);
|
||||
if (p)
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
|
||||
|
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
|
||||
|
||||
__skb_queue_tail(&q->skb_bad_txq, skb);
|
||||
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_inc(q, skb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_inc(q, skb);
|
||||
q->q.qlen++;
|
||||
}
|
||||
|
||||
if (lock)
|
||||
spin_unlock(lock);
|
||||
}
|
||||
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
|
||||
break;
|
||||
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
|
||||
qdisc_enqueue_skb_bad_txq(q, nskb);
|
||||
|
||||
if (qdisc_is_percpu_stats(q)) {
|
||||
qdisc_qstats_cpu_backlog_inc(q, nskb);
|
||||
qdisc_qstats_cpu_qlen_inc(q);
|
||||
} else {
|
||||
qdisc_qstats_backlog_inc(q, nskb);
|
||||
q->q.qlen++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
skb->next = nskb;
|
||||
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
||||
int band = prio2band[skb->priority & TC_PRIO_MAX];
|
||||
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
||||
struct skb_array *q = band2list(priv, band);
|
||||
unsigned int pkt_len = qdisc_pkt_len(skb);
|
||||
int err;
|
||||
|
||||
err = skb_array_produce(q, skb);
|
||||
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
||||
return qdisc_drop_cpu(skb, qdisc, to_free);
|
||||
|
||||
qdisc_qstats_cpu_qlen_inc(qdisc);
|
||||
qdisc_qstats_cpu_backlog_inc(qdisc, skb);
|
||||
/* Note: skb can not be used after skb_array_produce(),
|
||||
* so we better not use qdisc_qstats_cpu_backlog_inc()
|
||||
*/
|
||||
this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
}
|
||||
|
||||
if (unlikely(sch->q.qlen >= sch->limit))
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
return qdisc_drop_all(skb, sch, to_free);
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
|
||||
|
Reference in New Issue
Block a user