Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/sched/act_police.c net/sched/sch_drr.c net/sched/sch_hfsc.c net/sched/sch_prio.c net/sched/sch_red.c net/sched/sch_tbf.c In net-next the drop methods of the packet schedulers got removed, so the bug fixes to them in 'net' are irrelevant. A packet action unload crash fix conflicts with the addition of the new firstuse timestamp. Signed-off-by: David S. Miller <davem@davemloft.net>
Tento commit je obsažen v:
@@ -38,7 +38,7 @@ struct tcf_police {
|
||||
bool peak_present;
|
||||
};
|
||||
#define to_police(pc) \
|
||||
container_of(pc, struct tcf_police, common)
|
||||
container_of(pc->priv, struct tcf_police, common)
|
||||
|
||||
#define POL_TAB_MASK 15
|
||||
|
||||
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
|
||||
struct nlattr *est, struct tc_action *a,
|
||||
int ovr, int bind)
|
||||
{
|
||||
unsigned int h;
|
||||
int ret = 0, err;
|
||||
struct nlattr *tb[TCA_POLICE_MAX + 1];
|
||||
struct tc_police *parm;
|
||||
struct tcf_police *police;
|
||||
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
|
||||
struct tc_action_net *tn = net_generic(net, police_net_id);
|
||||
struct tcf_hashinfo *hinfo = tn->hinfo;
|
||||
int size;
|
||||
|
||||
if (nla == NULL)
|
||||
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
|
||||
|
||||
if (parm->index) {
|
||||
if (tcf_hash_search(tn, a, parm->index)) {
|
||||
police = to_police(a->priv);
|
||||
police = to_police(a);
|
||||
if (bind) {
|
||||
police->tcf_bindcnt += 1;
|
||||
police->tcf_refcnt += 1;
|
||||
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
|
||||
/* not replacing */
|
||||
return -EEXIST;
|
||||
}
|
||||
} else {
|
||||
ret = tcf_hash_create(tn, parm->index, NULL, a,
|
||||
sizeof(*police), bind, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ACT_P_CREATED;
|
||||
}
|
||||
|
||||
police = kzalloc(sizeof(*police), GFP_KERNEL);
|
||||
if (police == NULL)
|
||||
return -ENOMEM;
|
||||
ret = ACT_P_CREATED;
|
||||
police->tcf_refcnt = 1;
|
||||
spin_lock_init(&police->tcf_lock);
|
||||
if (bind)
|
||||
police->tcf_bindcnt = 1;
|
||||
police = to_police(a);
|
||||
override:
|
||||
if (parm->rate.rate) {
|
||||
err = -ENOMEM;
|
||||
@@ -238,17 +235,8 @@ override:
|
||||
return ret;
|
||||
|
||||
police->tcfp_t_c = ktime_get_ns();
|
||||
police->tcf_index = parm->index ? parm->index :
|
||||
tcf_hash_new_index(tn);
|
||||
police->tcf_tm.install = jiffies;
|
||||
police->tcf_tm.lastuse = jiffies;
|
||||
police->tcf_tm.firstuse = 0;
|
||||
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
|
||||
spin_lock_bh(&hinfo->lock);
|
||||
hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
|
||||
spin_unlock_bh(&hinfo->lock);
|
||||
tcf_hash_insert(tn, a);
|
||||
|
||||
a->priv = police;
|
||||
return ret;
|
||||
|
||||
failure_unlock:
|
||||
@@ -257,7 +245,7 @@ failure:
|
||||
qdisc_put_rtab(P_tab);
|
||||
qdisc_put_rtab(R_tab);
|
||||
if (ret == ACT_P_CREATED)
|
||||
kfree(police);
|
||||
tcf_hash_cleanup(a, est);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -175,7 +175,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
|
||||
struct tc_cls_flower_offload offload = {0};
|
||||
struct tc_to_netdev tc;
|
||||
|
||||
if (!tc_should_offload(dev, 0))
|
||||
if (!tc_should_offload(dev, tp, 0))
|
||||
return;
|
||||
|
||||
offload.command = TC_CLSFLOWER_DESTROY;
|
||||
@@ -198,7 +198,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
|
||||
struct tc_cls_flower_offload offload = {0};
|
||||
struct tc_to_netdev tc;
|
||||
|
||||
if (!tc_should_offload(dev, flags))
|
||||
if (!tc_should_offload(dev, tp, flags))
|
||||
return;
|
||||
|
||||
offload.command = TC_CLSFLOWER_REPLACE;
|
||||
@@ -220,7 +220,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
|
||||
struct tc_cls_flower_offload offload = {0};
|
||||
struct tc_to_netdev tc;
|
||||
|
||||
if (!tc_should_offload(dev, 0))
|
||||
if (!tc_should_offload(dev, tp, 0))
|
||||
return;
|
||||
|
||||
offload.command = TC_CLSFLOWER_STATS;
|
||||
|
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (tc_should_offload(dev, 0)) {
|
||||
if (tc_should_offload(dev, tp, 0)) {
|
||||
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
|
||||
offload.cls_u32->knode.handle = handle;
|
||||
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
|
||||
struct tc_to_netdev offload;
|
||||
int err;
|
||||
|
||||
if (!tc_should_offload(dev, tp, flags))
|
||||
return tc_skip_sw(flags) ? -EINVAL : 0;
|
||||
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (tc_should_offload(dev, flags)) {
|
||||
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
|
||||
offload.cls_u32->hnode.divisor = h->divisor;
|
||||
offload.cls_u32->hnode.handle = h->handle;
|
||||
offload.cls_u32->hnode.prio = h->prio;
|
||||
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
|
||||
offload.cls_u32->hnode.divisor = h->divisor;
|
||||
offload.cls_u32->hnode.handle = h->handle;
|
||||
offload.cls_u32->hnode.prio = h->prio;
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
if (tc_skip_sw(flags))
|
||||
return err;
|
||||
}
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
if (tc_skip_sw(flags))
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (tc_should_offload(dev, 0)) {
|
||||
if (tc_should_offload(dev, tp, 0)) {
|
||||
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
|
||||
offload.cls_u32->hnode.divisor = h->divisor;
|
||||
offload.cls_u32->hnode.handle = h->handle;
|
||||
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
|
||||
offload.type = TC_SETUP_CLSU32;
|
||||
offload.cls_u32 = &u32_offload;
|
||||
|
||||
if (tc_should_offload(dev, flags)) {
|
||||
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
|
||||
offload.cls_u32->knode.handle = n->handle;
|
||||
offload.cls_u32->knode.fshift = n->fshift;
|
||||
#ifdef CONFIG_CLS_U32_MARK
|
||||
offload.cls_u32->knode.val = n->val;
|
||||
offload.cls_u32->knode.mask = n->mask;
|
||||
#else
|
||||
offload.cls_u32->knode.val = 0;
|
||||
offload.cls_u32->knode.mask = 0;
|
||||
#endif
|
||||
offload.cls_u32->knode.sel = &n->sel;
|
||||
offload.cls_u32->knode.exts = &n->exts;
|
||||
if (n->ht_down)
|
||||
offload.cls_u32->knode.link_handle = n->ht_down->handle;
|
||||
if (!tc_should_offload(dev, tp, flags))
|
||||
return tc_skip_sw(flags) ? -EINVAL : 0;
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
if (tc_skip_sw(flags))
|
||||
return err;
|
||||
}
|
||||
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
|
||||
offload.cls_u32->knode.handle = n->handle;
|
||||
offload.cls_u32->knode.fshift = n->fshift;
|
||||
#ifdef CONFIG_CLS_U32_MARK
|
||||
offload.cls_u32->knode.val = n->val;
|
||||
offload.cls_u32->knode.mask = n->mask;
|
||||
#else
|
||||
offload.cls_u32->knode.val = 0;
|
||||
offload.cls_u32->knode.mask = 0;
|
||||
#endif
|
||||
offload.cls_u32->knode.sel = &n->sel;
|
||||
offload.cls_u32->knode.exts = &n->exts;
|
||||
if (n->ht_down)
|
||||
offload.cls_u32->knode.link_handle = n->ht_down->handle;
|
||||
|
||||
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
|
||||
tp->protocol, &offload);
|
||||
if (tc_skip_sw(flags))
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
if (tb[TCA_U32_FLAGS]) {
|
||||
flags = nla_get_u32(tb[TCA_U32_FLAGS]);
|
||||
if (!tc_flags_valid(flags))
|
||||
return err;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
n = (struct tc_u_knode *)*arg;
|
||||
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
ht->divisor = divisor;
|
||||
ht->handle = handle;
|
||||
ht->prio = tp->prio;
|
||||
|
||||
err = u32_replace_hw_hnode(tp, ht, flags);
|
||||
if (err) {
|
||||
kfree(ht);
|
||||
return err;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(ht->next, tp_c->hlist);
|
||||
rcu_assign_pointer(tp_c->hlist, ht);
|
||||
*arg = (unsigned long)ht;
|
||||
|
||||
u32_replace_hw_hnode(tp, ht, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -378,6 +378,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
cl->deficit = cl->quantum;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
return err;
|
||||
}
|
||||
@@ -410,6 +411,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
return skb;
|
||||
}
|
||||
@@ -446,6 +448,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
|
@@ -190,6 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
unsigned int idx, prev_backlog, prev_qlen;
|
||||
struct fq_codel_flow *flow;
|
||||
int uninitialized_var(ret);
|
||||
unsigned int pkt_len;
|
||||
bool memory_limited;
|
||||
|
||||
idx = fq_codel_classify(skb, sch, &ret);
|
||||
@@ -221,6 +222,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
prev_backlog = sch->qstats.backlog;
|
||||
prev_qlen = sch->q.qlen;
|
||||
|
||||
/* save this packet length as it might be dropped by fq_codel_drop() */
|
||||
pkt_len = qdisc_pkt_len(skb);
|
||||
/* fq_codel_drop() is quite expensive, as it performs a linear search
|
||||
* in q->backlogs[] to find a fat flow.
|
||||
* So instead of dropping a single packet, drop half of its backlog
|
||||
@@ -228,14 +231,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
*/
|
||||
ret = fq_codel_drop(sch, q->drop_batch_size);
|
||||
|
||||
q->drop_overlimit += prev_qlen - sch->q.qlen;
|
||||
prev_qlen -= sch->q.qlen;
|
||||
prev_backlog -= sch->qstats.backlog;
|
||||
q->drop_overlimit += prev_qlen;
|
||||
if (memory_limited)
|
||||
q->drop_overmemory += prev_qlen - sch->q.qlen;
|
||||
/* As we dropped packet(s), better let upper stack know this */
|
||||
qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
|
||||
prev_backlog - sch->qstats.backlog);
|
||||
q->drop_overmemory += prev_qlen;
|
||||
|
||||
return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
|
||||
/* As we dropped packet(s), better let upper stack know this.
|
||||
* If we dropped a packet for this flow, return NET_XMIT_CN,
|
||||
* but in this case, our parents wont increase their backlogs.
|
||||
*/
|
||||
if (ret == idx) {
|
||||
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
|
||||
prev_backlog - pkt_len);
|
||||
return NET_XMIT_CN;
|
||||
}
|
||||
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
/* This is the specific function called from codel_dequeue()
|
||||
@@ -647,7 +659,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
qs.backlog = q->backlogs[idx];
|
||||
qs.drops = flow->dropped;
|
||||
}
|
||||
if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
|
||||
if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
|
||||
return -1;
|
||||
if (idx < q->flows_cnt)
|
||||
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
|
||||
|
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
|
||||
{
|
||||
q->gso_skb = skb;
|
||||
q->qstats.requeues++;
|
||||
qdisc_qstats_backlog_inc(q, skb);
|
||||
q->q.qlen++; /* it's still part of the queue */
|
||||
__netif_schedule(q);
|
||||
|
||||
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
|
||||
txq = skb_get_tx_queue(txq->dev, skb);
|
||||
if (!netif_xmit_frozen_or_stopped(txq)) {
|
||||
q->gso_skb = NULL;
|
||||
qdisc_qstats_backlog_dec(q, skb);
|
||||
q->q.qlen--;
|
||||
} else
|
||||
skb = NULL;
|
||||
|
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
|
||||
q->eligible = RB_ROOT;
|
||||
INIT_LIST_HEAD(&q->droplist);
|
||||
qdisc_watchdog_cancel(&q->watchdog);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
|
||||
struct hfsc_sched *q = qdisc_priv(sch);
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct tc_hfsc_qopt qopt;
|
||||
struct hfsc_class *cl;
|
||||
unsigned int i;
|
||||
|
||||
sch->qstats.backlog = 0;
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
|
||||
sch->qstats.backlog += cl->qdisc->qstats.backlog;
|
||||
}
|
||||
|
||||
qopt.defcls = q->defcls;
|
||||
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
|
||||
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
if (cl->qdisc->q.qlen == 1)
|
||||
set_active(cl, qdisc_pkt_len(skb));
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
|
||||
return NET_XMIT_SUCCESS;
|
||||
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
|
||||
|
||||
qdisc_unthrottled(sch);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
|
||||
return skb;
|
||||
|
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
|
||||
return TC_H_MIN(classid) + 1;
|
||||
}
|
||||
|
||||
static bool ingress_cl_offload(u32 classid)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long ingress_bind_filter(struct Qdisc *sch,
|
||||
unsigned long parent, u32 classid)
|
||||
{
|
||||
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
|
||||
.put = ingress_put,
|
||||
.walk = ingress_walk,
|
||||
.tcf_chain = ingress_find_tcf,
|
||||
.tcf_cl_offload = ingress_cl_offload,
|
||||
.bind_tcf = ingress_bind_filter,
|
||||
.unbind_tcf = ingress_put,
|
||||
};
|
||||
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
|
||||
}
|
||||
}
|
||||
|
||||
static bool clsact_cl_offload(u32 classid)
|
||||
{
|
||||
return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
|
||||
}
|
||||
|
||||
static unsigned long clsact_bind_filter(struct Qdisc *sch,
|
||||
unsigned long parent, u32 classid)
|
||||
{
|
||||
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
|
||||
.put = ingress_put,
|
||||
.walk = ingress_walk,
|
||||
.tcf_chain = clsact_find_tcf,
|
||||
.tcf_cl_offload = clsact_cl_offload,
|
||||
.bind_tcf = clsact_bind_filter,
|
||||
.unbind_tcf = ingress_put,
|
||||
};
|
||||
|
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
|
||||
ret = qdisc_enqueue(skb, qdisc);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
|
||||
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
|
||||
if (skb) {
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
return skb;
|
||||
}
|
||||
@@ -133,6 +135,7 @@ prio_reset(struct Qdisc *sch)
|
||||
|
||||
for (prio = 0; prio < q->bands; prio++)
|
||||
qdisc_reset(q->queues[prio]);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
}
|
||||
|
||||
|
@@ -1238,8 +1238,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
|
||||
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
|
||||
qdisc_pkt_len(skb));
|
||||
if (err)
|
||||
return err;
|
||||
if (err) {
|
||||
cl->qstats.drops++;
|
||||
return qdisc_drop(skb, sch);
|
||||
}
|
||||
}
|
||||
|
||||
err = qdisc_enqueue(skb, cl->qdisc);
|
||||
|
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
|
||||
ret = qdisc_enqueue(skb, child);
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
} else if (net_xmit_drop_count(ret)) {
|
||||
q->stats.pdrop++;
|
||||
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
|
||||
skb = child->dequeue(child);
|
||||
if (skb) {
|
||||
qdisc_bstats_update(sch, skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
} else {
|
||||
if (!red_is_idling(&q->vars))
|
||||
@@ -139,6 +141,7 @@ static void red_reset(struct Qdisc *sch)
|
||||
struct red_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
red_restart(&q->vars);
|
||||
}
|
||||
|
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
return ret;
|
||||
}
|
||||
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
@@ -251,6 +252,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
|
||||
q->t_c = now;
|
||||
q->tokens = toks;
|
||||
q->ptokens = ptoks;
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
sch->q.qlen--;
|
||||
qdisc_unthrottled(sch);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
@@ -282,6 +284,7 @@ static void tbf_reset(struct Qdisc *sch)
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
qdisc_reset(q->qdisc);
|
||||
sch->qstats.backlog = 0;
|
||||
sch->q.qlen = 0;
|
||||
q->t_c = ktime_get_ns();
|
||||
q->tokens = q->buffer;
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele