Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Completely minor snmp doc conflict. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
|
||||
[TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(&p->tcft_enc_metadata->dst);
|
||||
kfree_rcu(p, rcu);
|
||||
}
|
||||
|
||||
static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action **a,
|
||||
int ovr, int bind, bool rtnl_held,
|
||||
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
|
||||
rcu_swap_protected(t->params, params_new,
|
||||
lockdep_is_held(&t->tcf_lock));
|
||||
spin_unlock_bh(&t->tcf_lock);
|
||||
if (params_new)
|
||||
kfree_rcu(params_new, rcu);
|
||||
tunnel_key_release_params(params_new);
|
||||
|
||||
if (ret == ACT_P_CREATED)
|
||||
tcf_idr_insert(tn, *a);
|
||||
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
|
||||
struct tcf_tunnel_key_params *params;
|
||||
|
||||
params = rcu_dereference_protected(t->params, 1);
|
||||
if (params) {
|
||||
if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
|
||||
dst_release(¶ms->tcft_enc_metadata->dst);
|
||||
|
||||
kfree_rcu(params, rcu);
|
||||
}
|
||||
tunnel_key_release_params(params);
|
||||
}
|
||||
|
||||
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
|
||||
|
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
const int max_reclassify_loop = 4;
|
||||
const struct tcf_proto *orig_tp = tp;
|
||||
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
reclassify:
|
||||
#endif
|
||||
for (; tp; tp = rcu_dereference_bh(tp->next)) {
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
int err;
|
||||
|
||||
if (tp->protocol != protocol &&
|
||||
@@ -1319,7 +1319,6 @@ reset:
|
||||
}
|
||||
|
||||
tp = first_tp;
|
||||
protocol = tc_skb_protocol(skb);
|
||||
goto reclassify;
|
||||
#endif
|
||||
}
|
||||
|
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
struct cls_fl_head *head = rtnl_dereference(tp->root);
|
||||
struct cls_fl_filter *fold = *arg;
|
||||
struct cls_fl_filter *fnew;
|
||||
struct fl_flow_mask *mask;
|
||||
struct nlattr **tb;
|
||||
struct fl_flow_mask mask = {};
|
||||
int err;
|
||||
|
||||
if (!tca[TCA_OPTIONS])
|
||||
return -EINVAL;
|
||||
|
||||
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
||||
if (!tb)
|
||||
mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
|
||||
if (!mask)
|
||||
return -ENOBUFS;
|
||||
|
||||
tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
|
||||
if (!tb) {
|
||||
err = -ENOBUFS;
|
||||
goto errout_mask_alloc;
|
||||
}
|
||||
|
||||
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
|
||||
fl_policy, NULL);
|
||||
if (err < 0)
|
||||
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
}
|
||||
}
|
||||
|
||||
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
|
||||
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
|
||||
tp->chain->tmplt_priv, extack);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
err = fl_check_assign_mask(head, fnew, fold, &mask);
|
||||
err = fl_check_assign_mask(head, fnew, fold, mask);
|
||||
if (err)
|
||||
goto errout_idr;
|
||||
|
||||
@@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
||||
}
|
||||
|
||||
kfree(tb);
|
||||
kfree(mask);
|
||||
return 0;
|
||||
|
||||
errout_mask:
|
||||
@@ -1405,6 +1412,8 @@ errout:
|
||||
kfree(fnew);
|
||||
errout_tb:
|
||||
kfree(tb);
|
||||
errout_mask_alloc:
|
||||
kfree(mask);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
|
||||
struct sk_buff *segs, *nskb;
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
unsigned int slen = 0;
|
||||
unsigned int slen = 0, numsegs = 0;
|
||||
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR_OR_NULL(segs))
|
||||
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
flow_queue_add(flow, segs);
|
||||
|
||||
sch->q.qlen++;
|
||||
numsegs++;
|
||||
slen += segs->len;
|
||||
q->buffer_used += segs->truesize;
|
||||
b->packets++;
|
||||
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
sch->qstats.backlog += slen;
|
||||
q->avg_window_bytes += slen;
|
||||
|
||||
qdisc_tree_reduce_backlog(sch, 1, len);
|
||||
qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
/* not splitting */
|
||||
|
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct Qdisc *child,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
int err;
|
||||
|
||||
err = child->ops->enqueue(skb, child, to_free);
|
||||
if (err != NET_XMIT_SUCCESS)
|
||||
return err;
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
||||
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct drr_sched *q = qdisc_priv(sch);
|
||||
struct drr_class *cl;
|
||||
int err = 0;
|
||||
bool first;
|
||||
|
||||
cl = drr_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cl->qdisc->q.qlen == 1) {
|
||||
if (first) {
|
||||
list_add_tail(&cl->alist, &q->active);
|
||||
cl->deficit = cl->quantum;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return err;
|
||||
}
|
||||
|
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
|
||||
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
||||
int err;
|
||||
|
||||
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
|
||||
static int
|
||||
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct hfsc_class *cl;
|
||||
int uninitialized_var(err);
|
||||
bool first;
|
||||
|
||||
cl = hfsc_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cl->qdisc->q.qlen == 1) {
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
|
||||
if (first) {
|
||||
if (cl->cl_flags & HFSC_RSC)
|
||||
init_ed(cl, len);
|
||||
if (cl->cl_flags & HFSC_FSC)
|
||||
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
int uninitialized_var(ret);
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct htb_sched *q = qdisc_priv(sch);
|
||||
struct htb_class *cl = htb_classify(skb, sch, &ret);
|
||||
|
||||
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
htb_activate(q, cl);
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
||||
static int
|
||||
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct Qdisc *qdisc;
|
||||
int ret;
|
||||
|
||||
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
ret = qdisc_enqueue(skb, qdisc, to_free);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
|
||||
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb), gso_segs;
|
||||
struct qfq_sched *q = qdisc_priv(sch);
|
||||
struct qfq_class *cl;
|
||||
struct qfq_aggregate *agg;
|
||||
int err = 0;
|
||||
bool first;
|
||||
|
||||
cl = qfq_classify(skb, sch, &err);
|
||||
if (cl == NULL) {
|
||||
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
}
|
||||
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
|
||||
|
||||
if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
|
||||
if (unlikely(cl->agg->lmax < len)) {
|
||||
pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
|
||||
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
|
||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
|
||||
qdisc_pkt_len(skb));
|
||||
cl->agg->lmax, len, cl->common.classid);
|
||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
|
||||
if (err) {
|
||||
cl->qstats.drops++;
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
}
|
||||
}
|
||||
|
||||
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return err;
|
||||
}
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
cl->bstats.bytes += len;
|
||||
cl->bstats.packets += gso_segs;
|
||||
sch->qstats.backlog += len;
|
||||
++sch->q.qlen;
|
||||
|
||||
agg = cl->agg;
|
||||
/* if the queue was not empty, then done here */
|
||||
if (cl->qdisc->q.qlen != 1) {
|
||||
if (!first) {
|
||||
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
||||
list_first_entry(&agg->active, struct qfq_class, alist)
|
||||
== cl && cl->deficit < qdisc_pkt_len(skb))
|
||||
== cl && cl->deficit < len)
|
||||
list_move_tail(&cl->alist, &agg->active);
|
||||
|
||||
return err;
|
||||
|
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
int ret;
|
||||
|
||||
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
return ret;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
Reference in New Issue
Block a user