123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /*
- * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
- *
- * Authors: Martin Devera, <[email protected]>
- *
- * Credits (in time order) for older HTB versions:
- * Stef Coene <[email protected]>
- * HTB support at LARTC mailing list
- * Ondrej Kraus, <[email protected]>
- * found missing INIT_QDISC(htb)
- * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
- * helped a lot to locate nasty class stall bug
- * Andi Kleen, Jamal Hadi, Bert Hubert
- * code review and helpful comments on shaping
- * Tomasz Wrona, <[email protected]>
- * created test case so that I was able to fix nasty bug
- * Wilfried Weissmann
- * spotted bug in dequeue code and helped with fix
- * Jiri Fojtasek
- * fixed requeue routine
- * and many others. thanks.
- */
- #include <linux/module.h>
- #include <linux/moduleparam.h>
- #include <linux/types.h>
- #include <linux/kernel.h>
- #include <linux/string.h>
- #include <linux/errno.h>
- #include <linux/skbuff.h>
- #include <linux/list.h>
- #include <linux/compiler.h>
- #include <linux/rbtree.h>
- #include <linux/workqueue.h>
- #include <linux/slab.h>
- #include <net/netlink.h>
- #include <net/sch_generic.h>
- #include <net/pkt_sched.h>
- #include <net/pkt_cls.h>
- /* HTB algorithm.
- Author: [email protected]
- ========================================================================
- HTB is like TBF with multiple classes. It is also similar to CBQ because
- it allows to assign priority to each class in hierarchy.
- In fact it is another implementation of Floyd's formal sharing.
- Levels:
- Each class is assigned level. Leaf has ALWAYS level 0 and root
- classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
- one less than their parent.
- */
- static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
- #define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
- #if HTB_VER >> 16 != TC_HTB_PROTOVER
- #error "Mismatched sch_htb.c and pkt_sch.h"
- #endif
- /* Module parameter and sysfs export */
- module_param (htb_hysteresis, int, 0640);
- MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
- static int htb_rate_est = 0; /* htb classes have a default rate estimator */
- module_param(htb_rate_est, int, 0640);
- MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
- /* used internaly to keep status of single class */
- enum htb_cmode {
- HTB_CANT_SEND, /* class can't send and can't borrow */
- HTB_MAY_BORROW, /* class can't send but may borrow */
- HTB_CAN_SEND /* class can send */
- };
- struct htb_prio {
- union {
- struct rb_root row;
- struct rb_root feed;
- };
- struct rb_node *ptr;
- /* When class changes from state 1->2 and disconnects from
- * parent's feed then we lost ptr value and start from the
- * first child again. Here we store classid of the
- * last valid ptr (used when ptr is NULL).
- */
- u32 last_ptr_id;
- };
- /* interior & leaf nodes; props specific to leaves are marked L:
- * To reduce false sharing, place mostly read fields at beginning,
- * and mostly written ones at the end.
- */
- struct htb_class {
- struct Qdisc_class_common common;
- struct psched_ratecfg rate;
- struct psched_ratecfg ceil;
- s64 buffer, cbuffer;/* token bucket depth/rate */
- s64 mbuffer; /* max wait time */
- u32 prio; /* these two are used only by leaves... */
- int quantum; /* but stored for parent-to-leaf return */
- struct tcf_proto __rcu *filter_list; /* class attached filters */
- struct tcf_block *block;
- int filter_cnt;
- int level; /* our level (see above) */
- unsigned int children;
- struct htb_class *parent; /* parent class */
- struct net_rate_estimator __rcu *rate_est;
- /*
- * Written often fields
- */
- struct gnet_stats_basic_sync bstats;
- struct gnet_stats_basic_sync bstats_bias;
- struct tc_htb_xstats xstats; /* our special stats */
- /* token bucket parameters */
- s64 tokens, ctokens;/* current number of tokens */
- s64 t_c; /* checkpoint time */
- union {
- struct htb_class_leaf {
- int deficit[TC_HTB_MAXDEPTH];
- struct Qdisc *q;
- struct netdev_queue *offload_queue;
- } leaf;
- struct htb_class_inner {
- struct htb_prio clprio[TC_HTB_NUMPRIO];
- } inner;
- };
- s64 pq_key;
- int prio_activity; /* for which prios are we active */
- enum htb_cmode cmode; /* current mode of the class */
- struct rb_node pq_node; /* node for event queue */
- struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
- unsigned int drops ____cacheline_aligned_in_smp;
- unsigned int overlimits;
- };
- struct htb_level {
- struct rb_root wait_pq;
- struct htb_prio hprio[TC_HTB_NUMPRIO];
- };
- struct htb_sched {
- struct Qdisc_class_hash clhash;
- int defcls; /* class where unclassified flows go to */
- int rate2quantum; /* quant = rate / rate2quantum */
- /* filters for qdisc itself */
- struct tcf_proto __rcu *filter_list;
- struct tcf_block *block;
- #define HTB_WARN_TOOMANYEVENTS 0x1
- unsigned int warned; /* only one warning */
- int direct_qlen;
- struct work_struct work;
- /* non shaped skbs; let them go directly thru */
- struct qdisc_skb_head direct_queue;
- u32 direct_pkts;
- u32 overlimits;
- struct qdisc_watchdog watchdog;
- s64 now; /* cached dequeue time */
- /* time of nearest event per level (row) */
- s64 near_ev_cache[TC_HTB_MAXDEPTH];
- int row_mask[TC_HTB_MAXDEPTH];
- struct htb_level hlevel[TC_HTB_MAXDEPTH];
- struct Qdisc **direct_qdiscs;
- unsigned int num_direct_qdiscs;
- bool offload;
- };
- /* find class in global hash table using given handle */
- static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct Qdisc_class_common *clc;
- clc = qdisc_class_find(&q->clhash, handle);
- if (clc == NULL)
- return NULL;
- return container_of(clc, struct htb_class, common);
- }
- static unsigned long htb_search(struct Qdisc *sch, u32 handle)
- {
- return (unsigned long)htb_find(handle, sch);
- }
- /**
- * htb_classify - classify a packet into class
- *
- * It returns NULL if the packet should be dropped or -1 if the packet
- * should be passed directly thru. In all other cases leaf class is returned.
- * We allow direct class selection by classid in priority. The we examine
- * filters in qdisc and in inner nodes (if higher filter points to the inner
- * node). If we end up with classid MAJOR:0 we enqueue the skb into special
- * internal fifo (direct). These packets then go directly thru. If we still
- * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
- * then finish and return direct queue.
- */
- #define HTB_DIRECT ((struct htb_class *)-1L)
- static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
- int *qerr)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl;
- struct tcf_result res;
- struct tcf_proto *tcf;
- int result;
- /* allow to select class by setting skb->priority to valid classid;
- * note that nfmark can be used too by attaching filter fw with no
- * rules in it
- */
- if (skb->priority == sch->handle)
- return HTB_DIRECT; /* X:0 (direct flow) selected */
- cl = htb_find(skb->priority, sch);
- if (cl) {
- if (cl->level == 0)
- return cl;
- /* Start with inner filter chain if a non-leaf class is selected */
- tcf = rcu_dereference_bh(cl->filter_list);
- } else {
- tcf = rcu_dereference_bh(q->filter_list);
- }
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
- #ifdef CONFIG_NET_CLS_ACT
- switch (result) {
- case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
- case TC_ACT_TRAP:
- *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- fallthrough;
- case TC_ACT_SHOT:
- return NULL;
- }
- #endif
- cl = (void *)res.class;
- if (!cl) {
- if (res.classid == sch->handle)
- return HTB_DIRECT; /* X:0 (direct flow) */
- cl = htb_find(res.classid, sch);
- if (!cl)
- break; /* filter selected invalid classid */
- }
- if (!cl->level)
- return cl; /* we hit leaf; return it */
- /* we have got inner class; apply inner filter chain */
- tcf = rcu_dereference_bh(cl->filter_list);
- }
- /* classification failed; try to use default class */
- cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
- if (!cl || cl->level)
- return HTB_DIRECT; /* bad default .. this is safe bet */
- return cl;
- }
- /**
- * htb_add_to_id_tree - adds class to the round robin list
- * @root: the root of the tree
- * @cl: the class to add
- * @prio: the give prio in class
- *
- * Routine adds class to the list (actually tree) sorted by classid.
- * Make sure that class is not already on such list for given prio.
- */
- static void htb_add_to_id_tree(struct rb_root *root,
- struct htb_class *cl, int prio)
- {
- struct rb_node **p = &root->rb_node, *parent = NULL;
- while (*p) {
- struct htb_class *c;
- parent = *p;
- c = rb_entry(parent, struct htb_class, node[prio]);
- if (cl->common.classid > c->common.classid)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- rb_link_node(&cl->node[prio], parent, p);
- rb_insert_color(&cl->node[prio], root);
- }
- /**
- * htb_add_to_wait_tree - adds class to the event queue with delay
- * @q: the priority event queue
- * @cl: the class to add
- * @delay: delay in microseconds
- *
- * The class is added to priority event queue to indicate that class will
- * change its mode in cl->pq_key microseconds. Make sure that class is not
- * already in the queue.
- */
- static void htb_add_to_wait_tree(struct htb_sched *q,
- struct htb_class *cl, s64 delay)
- {
- struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
- cl->pq_key = q->now + delay;
- if (cl->pq_key == q->now)
- cl->pq_key++;
- /* update the nearest event cache */
- if (q->near_ev_cache[cl->level] > cl->pq_key)
- q->near_ev_cache[cl->level] = cl->pq_key;
- while (*p) {
- struct htb_class *c;
- parent = *p;
- c = rb_entry(parent, struct htb_class, pq_node);
- if (cl->pq_key >= c->pq_key)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- rb_link_node(&cl->pq_node, parent, p);
- rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
- }
- /**
- * htb_next_rb_node - finds next node in binary tree
- * @n: the current node in binary tree
- *
- * When we are past last key we return NULL.
- * Average complexity is 2 steps per call.
- */
- static inline void htb_next_rb_node(struct rb_node **n)
- {
- *n = rb_next(*n);
- }
- /**
- * htb_add_class_to_row - add class to its row
- * @q: the priority event queue
- * @cl: the class to add
- * @mask: the given priorities in class in bitmap
- *
- * The class is added to row at priorities marked in mask.
- * It does nothing if mask == 0.
- */
- static inline void htb_add_class_to_row(struct htb_sched *q,
- struct htb_class *cl, int mask)
- {
- q->row_mask[cl->level] |= mask;
- while (mask) {
- int prio = ffz(~mask);
- mask &= ~(1 << prio);
- htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
- }
- }
- /* If this triggers, it is a bug in this code, but it need not be fatal */
- static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
- {
- if (RB_EMPTY_NODE(rb)) {
- WARN_ON(1);
- } else {
- rb_erase(rb, root);
- RB_CLEAR_NODE(rb);
- }
- }
- /**
- * htb_remove_class_from_row - removes class from its row
- * @q: the priority event queue
- * @cl: the class to add
- * @mask: the given priorities in class in bitmap
- *
- * The class is removed from row at priorities marked in mask.
- * It does nothing if mask == 0.
- */
- static inline void htb_remove_class_from_row(struct htb_sched *q,
- struct htb_class *cl, int mask)
- {
- int m = 0;
- struct htb_level *hlevel = &q->hlevel[cl->level];
- while (mask) {
- int prio = ffz(~mask);
- struct htb_prio *hprio = &hlevel->hprio[prio];
- mask &= ~(1 << prio);
- if (hprio->ptr == cl->node + prio)
- htb_next_rb_node(&hprio->ptr);
- htb_safe_rb_erase(cl->node + prio, &hprio->row);
- if (!hprio->row.rb_node)
- m |= 1 << prio;
- }
- q->row_mask[cl->level] &= ~m;
- }
- /**
- * htb_activate_prios - creates active classe's feed chain
- * @q: the priority event queue
- * @cl: the class to activate
- *
- * The class is connected to ancestors and/or appropriate rows
- * for priorities it is participating on. cl->cmode must be new
- * (activated) mode. It does nothing if cl->prio_activity == 0.
- */
- static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
- {
- struct htb_class *p = cl->parent;
- long m, mask = cl->prio_activity;
- while (cl->cmode == HTB_MAY_BORROW && p && mask) {
- m = mask;
- while (m) {
- unsigned int prio = ffz(~m);
- if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
- break;
- m &= ~(1 << prio);
- if (p->inner.clprio[prio].feed.rb_node)
- /* parent already has its feed in use so that
- * reset bit in mask as parent is already ok
- */
- mask &= ~(1 << prio);
- htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
- }
- p->prio_activity |= mask;
- cl = p;
- p = cl->parent;
- }
- if (cl->cmode == HTB_CAN_SEND && mask)
- htb_add_class_to_row(q, cl, mask);
- }
- /**
- * htb_deactivate_prios - remove class from feed chain
- * @q: the priority event queue
- * @cl: the class to deactivate
- *
- * cl->cmode must represent old mode (before deactivation). It does
- * nothing if cl->prio_activity == 0. Class is removed from all feed
- * chains and rows.
- */
- static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
- {
- struct htb_class *p = cl->parent;
- long m, mask = cl->prio_activity;
- while (cl->cmode == HTB_MAY_BORROW && p && mask) {
- m = mask;
- mask = 0;
- while (m) {
- int prio = ffz(~m);
- m &= ~(1 << prio);
- if (p->inner.clprio[prio].ptr == cl->node + prio) {
- /* we are removing child which is pointed to from
- * parent feed - forget the pointer but remember
- * classid
- */
- p->inner.clprio[prio].last_ptr_id = cl->common.classid;
- p->inner.clprio[prio].ptr = NULL;
- }
- htb_safe_rb_erase(cl->node + prio,
- &p->inner.clprio[prio].feed);
- if (!p->inner.clprio[prio].feed.rb_node)
- mask |= 1 << prio;
- }
- p->prio_activity &= ~mask;
- cl = p;
- p = cl->parent;
- }
- if (cl->cmode == HTB_CAN_SEND && mask)
- htb_remove_class_from_row(q, cl, mask);
- }
- static inline s64 htb_lowater(const struct htb_class *cl)
- {
- if (htb_hysteresis)
- return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
- else
- return 0;
- }
- static inline s64 htb_hiwater(const struct htb_class *cl)
- {
- if (htb_hysteresis)
- return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
- else
- return 0;
- }
- /**
- * htb_class_mode - computes and returns current class mode
- * @cl: the target class
- * @diff: diff time in microseconds
- *
- * It computes cl's mode at time cl->t_c+diff and returns it. If mode
- * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
- * from now to time when cl will change its state.
- * Also it is worth to note that class mode doesn't change simply
- * at cl->{c,}tokens == 0 but there can rather be hysteresis of
- * 0 .. -cl->{c,}buffer range. It is meant to limit number of
- * mode transitions per time unit. The speed gain is about 1/6.
- */
- static inline enum htb_cmode
- htb_class_mode(struct htb_class *cl, s64 *diff)
- {
- s64 toks;
- if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
- *diff = -toks;
- return HTB_CANT_SEND;
- }
- if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
- return HTB_CAN_SEND;
- *diff = -toks;
- return HTB_MAY_BORROW;
- }
- /**
- * htb_change_class_mode - changes classe's mode
- * @q: the priority event queue
- * @cl: the target class
- * @diff: diff time in microseconds
- *
- * This should be the only way how to change classe's mode under normal
- * circumstances. Routine will update feed lists linkage, change mode
- * and add class to the wait event queue if appropriate. New mode should
- * be different from old one and cl->pq_key has to be valid if changing
- * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
- */
- static void
- htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
- {
- enum htb_cmode new_mode = htb_class_mode(cl, diff);
- if (new_mode == cl->cmode)
- return;
- if (new_mode == HTB_CANT_SEND) {
- cl->overlimits++;
- q->overlimits++;
- }
- if (cl->prio_activity) { /* not necessary: speed optimization */
- if (cl->cmode != HTB_CANT_SEND)
- htb_deactivate_prios(q, cl);
- cl->cmode = new_mode;
- if (new_mode != HTB_CANT_SEND)
- htb_activate_prios(q, cl);
- } else
- cl->cmode = new_mode;
- }
- /**
- * htb_activate - inserts leaf cl into appropriate active feeds
- * @q: the priority event queue
- * @cl: the target class
- *
- * Routine learns (new) priority of leaf and activates feed chain
- * for the prio. It can be called on already active leaf safely.
- * It also adds leaf into droplist.
- */
- static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
- {
- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
- if (!cl->prio_activity) {
- cl->prio_activity = 1 << cl->prio;
- htb_activate_prios(q, cl);
- }
- }
- /**
- * htb_deactivate - remove leaf cl from active feeds
- * @q: the priority event queue
- * @cl: the target class
- *
- * Make sure that leaf is active. In the other words it can't be called
- * with non-active leaf. It also removes class from the drop list.
- */
- static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
- {
- WARN_ON(!cl->prio_activity);
- htb_deactivate_prios(q, cl);
- cl->prio_activity = 0;
- }
- static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff **to_free)
- {
- int ret;
- unsigned int len = qdisc_pkt_len(skb);
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = htb_classify(skb, sch, &ret);
- if (cl == HTB_DIRECT) {
- /* enqueue to helper queue */
- if (q->direct_queue.qlen < q->direct_qlen) {
- __qdisc_enqueue_tail(skb, &q->direct_queue);
- q->direct_pkts++;
- } else {
- return qdisc_drop(skb, sch, to_free);
- }
- #ifdef CONFIG_NET_CLS_ACT
- } else if (!cl) {
- if (ret & __NET_XMIT_BYPASS)
- qdisc_qstats_drop(sch);
- __qdisc_drop(skb, to_free);
- return ret;
- #endif
- } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
- to_free)) != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret)) {
- qdisc_qstats_drop(sch);
- cl->drops++;
- }
- return ret;
- } else {
- htb_activate(q, cl);
- }
- sch->qstats.backlog += len;
- sch->q.qlen++;
- return NET_XMIT_SUCCESS;
- }
- static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
- {
- s64 toks = diff + cl->tokens;
- if (toks > cl->buffer)
- toks = cl->buffer;
- toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
- if (toks <= -cl->mbuffer)
- toks = 1 - cl->mbuffer;
- cl->tokens = toks;
- }
- static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
- {
- s64 toks = diff + cl->ctokens;
- if (toks > cl->cbuffer)
- toks = cl->cbuffer;
- toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
- if (toks <= -cl->mbuffer)
- toks = 1 - cl->mbuffer;
- cl->ctokens = toks;
- }
- /**
- * htb_charge_class - charges amount "bytes" to leaf and ancestors
- * @q: the priority event queue
- * @cl: the class to start iterate
- * @level: the minimum level to account
- * @skb: the socket buffer
- *
- * Routine assumes that packet "bytes" long was dequeued from leaf cl
- * borrowing from "level". It accounts bytes to ceil leaky bucket for
- * leaf and all ancestors and to rate bucket for ancestors at levels
- * "level" and higher. It also handles possible change of mode resulting
- * from the update. Note that mode can also increase here (MAY_BORROW to
- * CAN_SEND) because we can use more precise clock that event queue here.
- * In such case we remove class from event queue first.
- */
- static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
- int level, struct sk_buff *skb)
- {
- int bytes = qdisc_pkt_len(skb);
- enum htb_cmode old_mode;
- s64 diff;
- while (cl) {
- diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
- if (cl->level >= level) {
- if (cl->level == level)
- cl->xstats.lends++;
- htb_accnt_tokens(cl, bytes, diff);
- } else {
- cl->xstats.borrows++;
- cl->tokens += diff; /* we moved t_c; update tokens */
- }
- htb_accnt_ctokens(cl, bytes, diff);
- cl->t_c = q->now;
- old_mode = cl->cmode;
- diff = 0;
- htb_change_class_mode(q, cl, &diff);
- if (old_mode != cl->cmode) {
- if (old_mode != HTB_CAN_SEND)
- htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
- if (cl->cmode != HTB_CAN_SEND)
- htb_add_to_wait_tree(q, cl, diff);
- }
- /* update basic stats except for leaves which are already updated */
- if (cl->level)
- bstats_update(&cl->bstats, skb);
- cl = cl->parent;
- }
- }
- /**
- * htb_do_events - make mode changes to classes at the level
- * @q: the priority event queue
- * @level: which wait_pq in 'q->hlevel'
- * @start: start jiffies
- *
- * Scans event queue for pending events and applies them. Returns time of
- * next pending event (0 for no event in pq, q->now for too many events).
- * Note: Applied are events whose have cl->pq_key <= q->now.
- */
- static s64 htb_do_events(struct htb_sched *q, const int level,
- unsigned long start)
- {
- /* don't run for longer than 2 jiffies; 2 is used instead of
- * 1 to simplify things when jiffy is going to be incremented
- * too soon
- */
- unsigned long stop_at = start + 2;
- struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
- while (time_before(jiffies, stop_at)) {
- struct htb_class *cl;
- s64 diff;
- struct rb_node *p = rb_first(wait_pq);
- if (!p)
- return 0;
- cl = rb_entry(p, struct htb_class, pq_node);
- if (cl->pq_key > q->now)
- return cl->pq_key;
- htb_safe_rb_erase(p, wait_pq);
- diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
- htb_change_class_mode(q, cl, &diff);
- if (cl->cmode != HTB_CAN_SEND)
- htb_add_to_wait_tree(q, cl, diff);
- }
- /* too much load - let's continue after a break for scheduling */
- if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
- pr_warn("htb: too many events!\n");
- q->warned |= HTB_WARN_TOOMANYEVENTS;
- }
- return q->now;
- }
- /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- * is no such one exists.
- */
- static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
- u32 id)
- {
- struct rb_node *r = NULL;
- while (n) {
- struct htb_class *cl =
- rb_entry(n, struct htb_class, node[prio]);
- if (id > cl->common.classid) {
- n = n->rb_right;
- } else if (id < cl->common.classid) {
- r = n;
- n = n->rb_left;
- } else {
- return n;
- }
- }
- return r;
- }
- /**
- * htb_lookup_leaf - returns next leaf class in DRR order
- * @hprio: the current one
- * @prio: which prio in class
- *
- * Find leaf where current feed pointers points to.
- */
- static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
- {
- int i;
- struct {
- struct rb_node *root;
- struct rb_node **pptr;
- u32 *pid;
- } stk[TC_HTB_MAXDEPTH], *sp = stk;
- BUG_ON(!hprio->row.rb_node);
- sp->root = hprio->row.rb_node;
- sp->pptr = &hprio->ptr;
- sp->pid = &hprio->last_ptr_id;
- for (i = 0; i < 65535; i++) {
- if (!*sp->pptr && *sp->pid) {
- /* ptr was invalidated but id is valid - try to recover
- * the original or next ptr
- */
- *sp->pptr =
- htb_id_find_next_upper(prio, sp->root, *sp->pid);
- }
- *sp->pid = 0; /* ptr is valid now so that remove this hint as it
- * can become out of date quickly
- */
- if (!*sp->pptr) { /* we are at right end; rewind & go up */
- *sp->pptr = sp->root;
- while ((*sp->pptr)->rb_left)
- *sp->pptr = (*sp->pptr)->rb_left;
- if (sp > stk) {
- sp--;
- if (!*sp->pptr) {
- WARN_ON(1);
- return NULL;
- }
- htb_next_rb_node(sp->pptr);
- }
- } else {
- struct htb_class *cl;
- struct htb_prio *clp;
- cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
- if (!cl->level)
- return cl;
- clp = &cl->inner.clprio[prio];
- (++sp)->root = clp->feed.rb_node;
- sp->pptr = &clp->ptr;
- sp->pid = &clp->last_ptr_id;
- }
- }
- WARN_ON(1);
- return NULL;
- }
- /* dequeues packet at given priority and level; call only if
- * you are sure that there is active class at prio/level
- */
- static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
- const int level)
- {
- struct sk_buff *skb = NULL;
- struct htb_class *cl, *start;
- struct htb_level *hlevel = &q->hlevel[level];
- struct htb_prio *hprio = &hlevel->hprio[prio];
- /* look initial class up in the row */
- start = cl = htb_lookup_leaf(hprio, prio);
- do {
- next:
- if (unlikely(!cl))
- return NULL;
- /* class can be empty - it is unlikely but can be true if leaf
- * qdisc drops packets in enqueue routine or if someone used
- * graft operation on the leaf since last dequeue;
- * simply deactivate and skip such class
- */
- if (unlikely(cl->leaf.q->q.qlen == 0)) {
- struct htb_class *next;
- htb_deactivate(q, cl);
- /* row/level might become empty */
- if ((q->row_mask[level] & (1 << prio)) == 0)
- return NULL;
- next = htb_lookup_leaf(hprio, prio);
- if (cl == start) /* fix start if we just deleted it */
- start = next;
- cl = next;
- goto next;
- }
- skb = cl->leaf.q->dequeue(cl->leaf.q);
- if (likely(skb != NULL))
- break;
- qdisc_warn_nonwc("htb", cl->leaf.q);
- htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
- &q->hlevel[0].hprio[prio].ptr);
- cl = htb_lookup_leaf(hprio, prio);
- } while (cl != start);
- if (likely(skb != NULL)) {
- bstats_update(&cl->bstats, skb);
- cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
- if (cl->leaf.deficit[level] < 0) {
- cl->leaf.deficit[level] += cl->quantum;
- htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
- &q->hlevel[0].hprio[prio].ptr);
- }
- /* this used to be after charge_class but this constelation
- * gives us slightly better performance
- */
- if (!cl->leaf.q->q.qlen)
- htb_deactivate(q, cl);
- htb_charge_class(q, cl, level, skb);
- }
- return skb;
- }
- static struct sk_buff *htb_dequeue(struct Qdisc *sch)
- {
- struct sk_buff *skb;
- struct htb_sched *q = qdisc_priv(sch);
- int level;
- s64 next_event;
- unsigned long start_at;
- /* try to dequeue direct packets as high prio (!) to minimize cpu work */
- skb = __qdisc_dequeue_head(&q->direct_queue);
- if (skb != NULL) {
- ok:
- qdisc_bstats_update(sch, skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
- return skb;
- }
- if (!sch->q.qlen)
- goto fin;
- q->now = ktime_get_ns();
- start_at = jiffies;
- next_event = q->now + 5LLU * NSEC_PER_SEC;
- for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
- /* common case optimization - skip event handler quickly */
- int m;
- s64 event = q->near_ev_cache[level];
- if (q->now >= event) {
- event = htb_do_events(q, level, start_at);
- if (!event)
- event = q->now + NSEC_PER_SEC;
- q->near_ev_cache[level] = event;
- }
- if (next_event > event)
- next_event = event;
- m = ~q->row_mask[level];
- while (m != (int)(-1)) {
- int prio = ffz(m);
- m |= 1 << prio;
- skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL))
- goto ok;
- }
- }
- if (likely(next_event > q->now))
- qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
- else
- schedule_work(&q->work);
- fin:
- return skb;
- }
- /* reset all classes */
- /* always caled under BH & queue lock */
- static void htb_reset(struct Qdisc *sch)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl;
- unsigned int i;
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (cl->level)
- memset(&cl->inner, 0, sizeof(cl->inner));
- else {
- if (cl->leaf.q && !q->offload)
- qdisc_reset(cl->leaf.q);
- }
- cl->prio_activity = 0;
- cl->cmode = HTB_CAN_SEND;
- }
- }
- qdisc_watchdog_cancel(&q->watchdog);
- __qdisc_reset_queue(&q->direct_queue);
- memset(q->hlevel, 0, sizeof(q->hlevel));
- memset(q->row_mask, 0, sizeof(q->row_mask));
- }
- static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
- [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
- [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
- [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
- [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
- [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
- [TCA_HTB_RATE64] = { .type = NLA_U64 },
- [TCA_HTB_CEIL64] = { .type = NLA_U64 },
- [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
- };
- static void htb_work_func(struct work_struct *work)
- {
- struct htb_sched *q = container_of(work, struct htb_sched, work);
- struct Qdisc *sch = q->watchdog.qdisc;
- rcu_read_lock();
- __netif_schedule(qdisc_root(sch));
- rcu_read_unlock();
- }
- static void htb_set_lockdep_class_child(struct Qdisc *q)
- {
- static struct lock_class_key child_key;
- lockdep_set_class(qdisc_lock(q), &child_key);
- }
- static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
- {
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
- }
- static int htb_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
- {
- struct net_device *dev = qdisc_dev(sch);
- struct tc_htb_qopt_offload offload_opt;
- struct htb_sched *q = qdisc_priv(sch);
- struct nlattr *tb[TCA_HTB_MAX + 1];
- struct tc_htb_glob *gopt;
- unsigned int ntx;
- bool offload;
- int err;
- qdisc_watchdog_init(&q->watchdog, sch);
- INIT_WORK(&q->work, htb_work_func);
- if (!opt)
- return -EINVAL;
- err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
- if (err)
- return err;
- err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
- NULL);
- if (err < 0)
- return err;
- if (!tb[TCA_HTB_INIT])
- return -EINVAL;
- gopt = nla_data(tb[TCA_HTB_INIT]);
- if (gopt->version != HTB_VER >> 16)
- return -EINVAL;
- offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
- if (offload) {
- if (sch->parent != TC_H_ROOT) {
- NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
- return -EOPNOTSUPP;
- }
- if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
- NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
- return -EOPNOTSUPP;
- }
- q->num_direct_qdiscs = dev->real_num_tx_queues;
- q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
- sizeof(*q->direct_qdiscs),
- GFP_KERNEL);
- if (!q->direct_qdiscs)
- return -ENOMEM;
- }
- err = qdisc_class_hash_init(&q->clhash);
- if (err < 0)
- return err;
- if (tb[TCA_HTB_DIRECT_QLEN])
- q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
- else
- q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
- if ((q->rate2quantum = gopt->rate2quantum) < 1)
- q->rate2quantum = 1;
- q->defcls = gopt->defcls;
- if (!offload)
- return 0;
- for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
- struct Qdisc *qdisc;
- qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
- TC_H_MAKE(sch->handle, 0), extack);
- if (!qdisc) {
- return -ENOMEM;
- }
- htb_set_lockdep_class_child(qdisc);
- q->direct_qdiscs[ntx] = qdisc;
- qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
- }
- sch->flags |= TCQ_F_MQROOT;
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_CREATE,
- .parent_classid = TC_H_MAJ(sch->handle) >> 16,
- .classid = TC_H_MIN(q->defcls),
- .extack = extack,
- };
- err = htb_offload(dev, &offload_opt);
- if (err)
- return err;
- /* Defer this assignment, so that htb_destroy skips offload-related
- * parts (especially calling ndo_setup_tc) on errors.
- */
- q->offload = true;
- return 0;
- }
- static void htb_attach_offload(struct Qdisc *sch)
- {
- struct net_device *dev = qdisc_dev(sch);
- struct htb_sched *q = qdisc_priv(sch);
- unsigned int ntx;
- for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
- struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
- old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
- qdisc_put(old);
- qdisc_hash_add(qdisc, false);
- }
- for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
- struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
- qdisc_put(old);
- }
- kfree(q->direct_qdiscs);
- q->direct_qdiscs = NULL;
- }
- static void htb_attach_software(struct Qdisc *sch)
- {
- struct net_device *dev = qdisc_dev(sch);
- unsigned int ntx;
- /* Resemble qdisc_graft behavior. */
- for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
- struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
- qdisc_refcount_inc(sch);
- qdisc_put(old);
- }
- }
- static void htb_attach(struct Qdisc *sch)
- {
- struct htb_sched *q = qdisc_priv(sch);
- if (q->offload)
- htb_attach_offload(sch);
- else
- htb_attach_software(sch);
- }
- static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct nlattr *nest;
- struct tc_htb_glob gopt;
- if (q->offload)
- sch->flags |= TCQ_F_OFFLOADED;
- else
- sch->flags &= ~TCQ_F_OFFLOADED;
- sch->qstats.overlimits = q->overlimits;
- /* Its safe to not acquire qdisc lock. As we hold RTNL,
- * no change can happen on the qdisc parameters.
- */
- gopt.direct_pkts = q->direct_pkts;
- gopt.version = HTB_VER;
- gopt.rate2quantum = q->rate2quantum;
- gopt.defcls = q->defcls;
- gopt.debug = 0;
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
- nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
- goto nla_put_failure;
- if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
- nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
- }
- static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
- struct sk_buff *skb, struct tcmsg *tcm)
- {
- struct htb_class *cl = (struct htb_class *)arg;
- struct htb_sched *q = qdisc_priv(sch);
- struct nlattr *nest;
- struct tc_htb_opt opt;
- /* Its safe to not acquire qdisc lock. As we hold RTNL,
- * no change can happen on the class parameters.
- */
- tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
- tcm->tcm_handle = cl->common.classid;
- if (!cl->level && cl->leaf.q)
- tcm->tcm_info = cl->leaf.q->handle;
- nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- memset(&opt, 0, sizeof(opt));
- psched_ratecfg_getrate(&opt.rate, &cl->rate);
- opt.buffer = PSCHED_NS2TICKS(cl->buffer);
- psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
- opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
- opt.quantum = cl->quantum;
- opt.prio = cl->prio;
- opt.level = cl->level;
- if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
- goto nla_put_failure;
- if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
- goto nla_put_failure;
- if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
- nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
- TCA_HTB_PAD))
- goto nla_put_failure;
- if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
- nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
- TCA_HTB_PAD))
- goto nla_put_failure;
- return nla_nest_end(skb, nest);
- nla_put_failure:
- nla_nest_cancel(skb, nest);
- return -1;
- }
- static void htb_offload_aggregate_stats(struct htb_sched *q,
- struct htb_class *cl)
- {
- u64 bytes = 0, packets = 0;
- struct htb_class *c;
- unsigned int i;
- gnet_stats_basic_sync_init(&cl->bstats);
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
- struct htb_class *p = c;
- while (p && p->level < cl->level)
- p = p->parent;
- if (p != cl)
- continue;
- bytes += u64_stats_read(&c->bstats_bias.bytes);
- packets += u64_stats_read(&c->bstats_bias.packets);
- if (c->level == 0) {
- bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
- packets += u64_stats_read(&c->leaf.q->bstats.packets);
- }
- }
- }
- _bstats_update(&cl->bstats, bytes, packets);
- }
- static int
- htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
- {
- struct htb_class *cl = (struct htb_class *)arg;
- struct htb_sched *q = qdisc_priv(sch);
- struct gnet_stats_queue qs = {
- .drops = cl->drops,
- .overlimits = cl->overlimits,
- };
- __u32 qlen = 0;
- if (!cl->level && cl->leaf.q)
- qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
- cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
- INT_MIN, INT_MAX);
- cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
- INT_MIN, INT_MAX);
- if (q->offload) {
- if (!cl->level) {
- if (cl->leaf.q)
- cl->bstats = cl->leaf.q->bstats;
- else
- gnet_stats_basic_sync_init(&cl->bstats);
- _bstats_update(&cl->bstats,
- u64_stats_read(&cl->bstats_bias.bytes),
- u64_stats_read(&cl->bstats_bias.packets));
- } else {
- htb_offload_aggregate_stats(q, cl);
- }
- }
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
- gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
- return -1;
- return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
- }
- static struct netdev_queue *
- htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
- {
- struct net_device *dev = qdisc_dev(sch);
- struct tc_htb_qopt_offload offload_opt;
- struct htb_sched *q = qdisc_priv(sch);
- int err;
- if (!q->offload)
- return sch->dev_queue;
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_LEAF_QUERY_QUEUE,
- .classid = TC_H_MIN(tcm->tcm_parent),
- };
- err = htb_offload(dev, &offload_opt);
- if (err || offload_opt.qid >= dev->num_tx_queues)
- return NULL;
- return netdev_get_tx_queue(dev, offload_opt.qid);
- }
- static struct Qdisc *
- htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
- {
- struct net_device *dev = dev_queue->dev;
- struct Qdisc *old_q;
- if (dev->flags & IFF_UP)
- dev_deactivate(dev);
- old_q = dev_graft_qdisc(dev_queue, new_q);
- if (new_q)
- new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
- if (dev->flags & IFF_UP)
- dev_activate(dev);
- return old_q;
- }
- static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
- {
- struct netdev_queue *queue;
- queue = cl->leaf.offload_queue;
- if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
- WARN_ON(cl->leaf.q->dev_queue != queue);
- return queue;
- }
- static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
- struct htb_class *cl_new, bool destroying)
- {
- struct netdev_queue *queue_old, *queue_new;
- struct net_device *dev = qdisc_dev(sch);
- queue_old = htb_offload_get_queue(cl_old);
- queue_new = htb_offload_get_queue(cl_new);
- if (!destroying) {
- struct Qdisc *qdisc;
- if (dev->flags & IFF_UP)
- dev_deactivate(dev);
- qdisc = dev_graft_qdisc(queue_old, NULL);
- WARN_ON(qdisc != cl_old->leaf.q);
- }
- if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
- cl_old->leaf.q->dev_queue = queue_new;
- cl_old->leaf.offload_queue = queue_new;
- if (!destroying) {
- struct Qdisc *qdisc;
- qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
- if (dev->flags & IFF_UP)
- dev_activate(dev);
- WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
- }
- }
- static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old, struct netlink_ext_ack *extack)
- {
- struct netdev_queue *dev_queue = sch->dev_queue;
- struct htb_class *cl = (struct htb_class *)arg;
- struct htb_sched *q = qdisc_priv(sch);
- struct Qdisc *old_q;
- if (cl->level)
- return -EINVAL;
- if (q->offload)
- dev_queue = htb_offload_get_queue(cl);
- if (!new) {
- new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
- cl->common.classid, extack);
- if (!new)
- return -ENOBUFS;
- }
- if (q->offload) {
- htb_set_lockdep_class_child(new);
- /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
- qdisc_refcount_inc(new);
- old_q = htb_graft_helper(dev_queue, new);
- }
- *old = qdisc_replace(sch, new, &cl->leaf.q);
- if (q->offload) {
- WARN_ON(old_q != *old);
- qdisc_put(old_q);
- }
- return 0;
- }
- static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
- {
- struct htb_class *cl = (struct htb_class *)arg;
- return !cl->level ? cl->leaf.q : NULL;
- }
- static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
- {
- struct htb_class *cl = (struct htb_class *)arg;
- htb_deactivate(qdisc_priv(sch), cl);
- }
- static inline int htb_parent_last_child(struct htb_class *cl)
- {
- if (!cl->parent)
- /* the root class */
- return 0;
- if (cl->parent->children > 1)
- /* not the last child */
- return 0;
- return 1;
- }
- static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
- struct Qdisc *new_q)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *parent = cl->parent;
- WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
- if (parent->cmode != HTB_CAN_SEND)
- htb_safe_rb_erase(&parent->pq_node,
- &q->hlevel[parent->level].wait_pq);
- parent->level = 0;
- memset(&parent->inner, 0, sizeof(parent->inner));
- parent->leaf.q = new_q ? new_q : &noop_qdisc;
- parent->tokens = parent->buffer;
- parent->ctokens = parent->cbuffer;
- parent->t_c = ktime_get_ns();
- parent->cmode = HTB_CAN_SEND;
- if (q->offload)
- parent->leaf.offload_queue = cl->leaf.offload_queue;
- }
- static void htb_parent_to_leaf_offload(struct Qdisc *sch,
- struct netdev_queue *dev_queue,
- struct Qdisc *new_q)
- {
- struct Qdisc *old_q;
- /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
- if (new_q)
- qdisc_refcount_inc(new_q);
- old_q = htb_graft_helper(dev_queue, new_q);
- WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
- }
- static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
- bool last_child, bool destroying,
- struct netlink_ext_ack *extack)
- {
- struct tc_htb_qopt_offload offload_opt;
- struct netdev_queue *dev_queue;
- struct Qdisc *q = cl->leaf.q;
- struct Qdisc *old;
- int err;
- if (cl->level)
- return -EINVAL;
- WARN_ON(!q);
- dev_queue = htb_offload_get_queue(cl);
- /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
- * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
- * does not need to graft or qdisc_put the qdisc being destroyed.
- */
- if (!destroying) {
- old = htb_graft_helper(dev_queue, NULL);
- /* Last qdisc grafted should be the same as cl->leaf.q when
- * calling htb_delete.
- */
- WARN_ON(old != q);
- }
- if (cl->parent) {
- _bstats_update(&cl->parent->bstats_bias,
- u64_stats_read(&q->bstats.bytes),
- u64_stats_read(&q->bstats.packets));
- }
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = !last_child ? TC_HTB_LEAF_DEL :
- destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
- TC_HTB_LEAF_DEL_LAST,
- .classid = cl->common.classid,
- .extack = extack,
- };
- err = htb_offload(qdisc_dev(sch), &offload_opt);
- if (!destroying) {
- if (!err)
- qdisc_put(old);
- else
- htb_graft_helper(dev_queue, old);
- }
- if (last_child)
- return err;
- if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
- u32 classid = TC_H_MAJ(sch->handle) |
- TC_H_MIN(offload_opt.classid);
- struct htb_class *moved_cl = htb_find(classid, sch);
- htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
- }
- return err;
- }
- static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
- {
- if (!cl->level) {
- WARN_ON(!cl->leaf.q);
- qdisc_put(cl->leaf.q);
- }
- gen_kill_estimator(&cl->rate_est);
- tcf_block_put(cl->block);
- kfree(cl);
- }
- static void htb_destroy(struct Qdisc *sch)
- {
- struct net_device *dev = qdisc_dev(sch);
- struct tc_htb_qopt_offload offload_opt;
- struct htb_sched *q = qdisc_priv(sch);
- struct hlist_node *next;
- bool nonempty, changed;
- struct htb_class *cl;
- unsigned int i;
- cancel_work_sync(&q->work);
- qdisc_watchdog_cancel(&q->watchdog);
- /* This line used to be after htb_destroy_class call below
- * and surprisingly it worked in 2.4. But it must precede it
- * because filter need its target class alive to be able to call
- * unbind_filter on it (without Oops).
- */
- tcf_block_put(q->block);
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- tcf_block_put(cl->block);
- cl->block = NULL;
- }
- }
- do {
- nonempty = false;
- changed = false;
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
- common.hnode) {
- bool last_child;
- if (!q->offload) {
- htb_destroy_class(sch, cl);
- continue;
- }
- nonempty = true;
- if (cl->level)
- continue;
- changed = true;
- last_child = htb_parent_last_child(cl);
- htb_destroy_class_offload(sch, cl, last_child,
- true, NULL);
- qdisc_class_hash_remove(&q->clhash,
- &cl->common);
- if (cl->parent)
- cl->parent->children--;
- if (last_child)
- htb_parent_to_leaf(sch, cl, NULL);
- htb_destroy_class(sch, cl);
- }
- }
- } while (changed);
- WARN_ON(nonempty);
- qdisc_class_hash_destroy(&q->clhash);
- __qdisc_reset_queue(&q->direct_queue);
- if (q->offload) {
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_DESTROY,
- };
- htb_offload(dev, &offload_opt);
- }
- if (!q->direct_qdiscs)
- return;
- for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
- qdisc_put(q->direct_qdiscs[i]);
- kfree(q->direct_qdiscs);
- }
- static int htb_delete(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = (struct htb_class *)arg;
- struct Qdisc *new_q = NULL;
- int last_child = 0;
- int err;
- /* TODO: why don't allow to delete subtree ? references ? does
- * tc subsys guarantee us that in htb_destroy it holds no class
- * refs so that we can remove children safely there ?
- */
- if (cl->children || cl->filter_cnt)
- return -EBUSY;
- if (!cl->level && htb_parent_last_child(cl))
- last_child = 1;
- if (q->offload) {
- err = htb_destroy_class_offload(sch, cl, last_child, false,
- extack);
- if (err)
- return err;
- }
- if (last_child) {
- struct netdev_queue *dev_queue = sch->dev_queue;
- if (q->offload)
- dev_queue = htb_offload_get_queue(cl);
- new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
- cl->parent->common.classid,
- NULL);
- if (q->offload) {
- if (new_q)
- htb_set_lockdep_class_child(new_q);
- htb_parent_to_leaf_offload(sch, dev_queue, new_q);
- }
- }
- sch_tree_lock(sch);
- if (!cl->level)
- qdisc_purge_queue(cl->leaf.q);
- /* delete from hash and active; remainder in destroy_class */
- qdisc_class_hash_remove(&q->clhash, &cl->common);
- if (cl->parent)
- cl->parent->children--;
- if (cl->prio_activity)
- htb_deactivate(q, cl);
- if (cl->cmode != HTB_CAN_SEND)
- htb_safe_rb_erase(&cl->pq_node,
- &q->hlevel[cl->level].wait_pq);
- if (last_child)
- htb_parent_to_leaf(sch, cl, new_q);
- sch_tree_unlock(sch);
- htb_destroy_class(sch, cl);
- return 0;
- }
- static int htb_change_class(struct Qdisc *sch, u32 classid,
- u32 parentid, struct nlattr **tca,
- unsigned long *arg, struct netlink_ext_ack *extack)
- {
- int err = -EINVAL;
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = (struct htb_class *)*arg, *parent;
- struct tc_htb_qopt_offload offload_opt;
- struct nlattr *opt = tca[TCA_OPTIONS];
- struct nlattr *tb[TCA_HTB_MAX + 1];
- struct Qdisc *parent_qdisc = NULL;
- struct netdev_queue *dev_queue;
- struct tc_htb_opt *hopt;
- u64 rate64, ceil64;
- int warn = 0;
- /* extract all subattrs from opt attr */
- if (!opt)
- goto failure;
- err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
- NULL);
- if (err < 0)
- goto failure;
- err = -EINVAL;
- if (tb[TCA_HTB_PARMS] == NULL)
- goto failure;
- parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
- hopt = nla_data(tb[TCA_HTB_PARMS]);
- if (!hopt->rate.rate || !hopt->ceil.rate)
- goto failure;
- if (q->offload) {
- /* Options not supported by the offload. */
- if (hopt->rate.overhead || hopt->ceil.overhead) {
- NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
- goto failure;
- }
- if (hopt->rate.mpu || hopt->ceil.mpu) {
- NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
- goto failure;
- }
- if (hopt->quantum) {
- NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter");
- goto failure;
- }
- if (hopt->prio) {
- NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter");
- goto failure;
- }
- }
- /* Keeping backward compatible with rate_table based iproute2 tc */
- if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
- qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
- NULL));
- if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
- qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
- NULL));
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
- if (!cl) { /* new class */
- struct net_device *dev = qdisc_dev(sch);
- struct Qdisc *new_q, *old_q;
- int prio;
- struct {
- struct nlattr nla;
- struct gnet_estimator opt;
- } est = {
- .nla = {
- .nla_len = nla_attr_size(sizeof(est.opt)),
- .nla_type = TCA_RATE,
- },
- .opt = {
- /* 4s interval, 16s averaging constant */
- .interval = 2,
- .ewma_log = 2,
- },
- };
- /* check for valid classid */
- if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
- htb_find(classid, sch))
- goto failure;
- /* check maximal depth */
- if (parent && parent->parent && parent->parent->level < 2) {
- pr_err("htb: tree is too deep\n");
- goto failure;
- }
- err = -ENOBUFS;
- cl = kzalloc(sizeof(*cl), GFP_KERNEL);
- if (!cl)
- goto failure;
- gnet_stats_basic_sync_init(&cl->bstats);
- gnet_stats_basic_sync_init(&cl->bstats_bias);
- err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
- if (err) {
- kfree(cl);
- goto failure;
- }
- if (htb_rate_est || tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, NULL,
- &cl->rate_est,
- NULL,
- true,
- tca[TCA_RATE] ? : &est.nla);
- if (err)
- goto err_block_put;
- }
- cl->children = 0;
- RB_CLEAR_NODE(&cl->pq_node);
- for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
- RB_CLEAR_NODE(&cl->node[prio]);
- cl->common.classid = classid;
- /* Make sure nothing interrupts us in between of two
- * ndo_setup_tc calls.
- */
- ASSERT_RTNL();
- /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
- * so that can't be used inside of sch_tree_lock
- * -- thanks to Karlis Peisenieks
- */
- if (!q->offload) {
- dev_queue = sch->dev_queue;
- } else if (!(parent && !parent->level)) {
- /* Assign a dev_queue to this classid. */
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_LEAF_ALLOC_QUEUE,
- .classid = cl->common.classid,
- .parent_classid = parent ?
- TC_H_MIN(parent->common.classid) :
- TC_HTB_CLASSID_ROOT,
- .rate = max_t(u64, hopt->rate.rate, rate64),
- .ceil = max_t(u64, hopt->ceil.rate, ceil64),
- .extack = extack,
- };
- err = htb_offload(dev, &offload_opt);
- if (err) {
- pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
- err);
- goto err_kill_estimator;
- }
- dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
- } else { /* First child. */
- dev_queue = htb_offload_get_queue(parent);
- old_q = htb_graft_helper(dev_queue, NULL);
- WARN_ON(old_q != parent->leaf.q);
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_LEAF_TO_INNER,
- .classid = cl->common.classid,
- .parent_classid =
- TC_H_MIN(parent->common.classid),
- .rate = max_t(u64, hopt->rate.rate, rate64),
- .ceil = max_t(u64, hopt->ceil.rate, ceil64),
- .extack = extack,
- };
- err = htb_offload(dev, &offload_opt);
- if (err) {
- pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
- err);
- htb_graft_helper(dev_queue, old_q);
- goto err_kill_estimator;
- }
- _bstats_update(&parent->bstats_bias,
- u64_stats_read(&old_q->bstats.bytes),
- u64_stats_read(&old_q->bstats.packets));
- qdisc_put(old_q);
- }
- new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
- classid, NULL);
- if (q->offload) {
- if (new_q) {
- htb_set_lockdep_class_child(new_q);
- /* One ref for cl->leaf.q, the other for
- * dev_queue->qdisc.
- */
- qdisc_refcount_inc(new_q);
- }
- old_q = htb_graft_helper(dev_queue, new_q);
- /* No qdisc_put needed. */
- WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
- }
- sch_tree_lock(sch);
- if (parent && !parent->level) {
- /* turn parent into inner node */
- qdisc_purge_queue(parent->leaf.q);
- parent_qdisc = parent->leaf.q;
- if (parent->prio_activity)
- htb_deactivate(q, parent);
- /* remove from evt list because of level change */
- if (parent->cmode != HTB_CAN_SEND) {
- htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
- parent->cmode = HTB_CAN_SEND;
- }
- parent->level = (parent->parent ? parent->parent->level
- : TC_HTB_MAXDEPTH) - 1;
- memset(&parent->inner, 0, sizeof(parent->inner));
- }
- /* leaf (we) needs elementary qdisc */
- cl->leaf.q = new_q ? new_q : &noop_qdisc;
- if (q->offload)
- cl->leaf.offload_queue = dev_queue;
- cl->parent = parent;
- /* set class to be in HTB_CAN_SEND state */
- cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
- cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
- cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
- cl->t_c = ktime_get_ns();
- cl->cmode = HTB_CAN_SEND;
- /* attach to the hash list and parent's family */
- qdisc_class_hash_insert(&q->clhash, &cl->common);
- if (parent)
- parent->children++;
- if (cl->leaf.q != &noop_qdisc)
- qdisc_hash_add(cl->leaf.q, true);
- } else {
- if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, NULL,
- &cl->rate_est,
- NULL,
- true,
- tca[TCA_RATE]);
- if (err)
- return err;
- }
- if (q->offload) {
- struct net_device *dev = qdisc_dev(sch);
- offload_opt = (struct tc_htb_qopt_offload) {
- .command = TC_HTB_NODE_MODIFY,
- .classid = cl->common.classid,
- .rate = max_t(u64, hopt->rate.rate, rate64),
- .ceil = max_t(u64, hopt->ceil.rate, ceil64),
- .extack = extack,
- };
- err = htb_offload(dev, &offload_opt);
- if (err)
- /* Estimator was replaced, and rollback may fail
- * as well, so we don't try to recover it, and
- * the estimator won't work property with the
- * offload anyway, because bstats are updated
- * only when the stats are queried.
- */
- return err;
- }
- sch_tree_lock(sch);
- }
- psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
- psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
- /* it used to be a nasty bug here, we have to check that node
- * is really leaf before changing cl->leaf !
- */
- if (!cl->level) {
- u64 quantum = cl->rate.rate_bytes_ps;
- do_div(quantum, q->rate2quantum);
- cl->quantum = min_t(u64, quantum, INT_MAX);
- if (!hopt->quantum && cl->quantum < 1000) {
- warn = -1;
- cl->quantum = 1000;
- }
- if (!hopt->quantum && cl->quantum > 200000) {
- warn = 1;
- cl->quantum = 200000;
- }
- if (hopt->quantum)
- cl->quantum = hopt->quantum;
- if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
- cl->prio = TC_HTB_NUMPRIO - 1;
- }
- cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
- cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
- sch_tree_unlock(sch);
- qdisc_put(parent_qdisc);
- if (warn)
- pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
- cl->common.classid, (warn == -1 ? "small" : "big"));
- qdisc_class_hash_grow(sch, &q->clhash);
- *arg = (unsigned long)cl;
- return 0;
- err_kill_estimator:
- gen_kill_estimator(&cl->rate_est);
- err_block_put:
- tcf_block_put(cl->block);
- kfree(cl);
- failure:
- return err;
- }
- static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
- struct netlink_ext_ack *extack)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = (struct htb_class *)arg;
- return cl ? cl->block : q->block;
- }
- static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
- u32 classid)
- {
- struct htb_class *cl = htb_find(classid, sch);
- /*if (cl && !cl->level) return 0;
- * The line above used to be there to prevent attaching filters to
- * leaves. But at least tc_index filter uses this just to get class
- * for other reasons so that we have to allow for it.
- * ----
- * 19.6.2002 As Werner explained it is ok - bind filter is just
- * another way to "lock" the class - unlike "get" this lock can
- * be broken by class during destroy IIUC.
- */
- if (cl)
- cl->filter_cnt++;
- return (unsigned long)cl;
- }
- static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
- {
- struct htb_class *cl = (struct htb_class *)arg;
- if (cl)
- cl->filter_cnt--;
- }
- static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
- {
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl;
- unsigned int i;
- if (arg->stop)
- return;
- for (i = 0; i < q->clhash.hashsize; i++) {
- hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
- if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
- return;
- }
- }
- }
- static const struct Qdisc_class_ops htb_class_ops = {
- .select_queue = htb_select_queue,
- .graft = htb_graft,
- .leaf = htb_leaf,
- .qlen_notify = htb_qlen_notify,
- .find = htb_search,
- .change = htb_change_class,
- .delete = htb_delete,
- .walk = htb_walk,
- .tcf_block = htb_tcf_block,
- .bind_tcf = htb_bind_filter,
- .unbind_tcf = htb_unbind_filter,
- .dump = htb_dump_class,
- .dump_stats = htb_dump_class_stats,
- };
- static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
- .cl_ops = &htb_class_ops,
- .id = "htb",
- .priv_size = sizeof(struct htb_sched),
- .enqueue = htb_enqueue,
- .dequeue = htb_dequeue,
- .peek = qdisc_peek_dequeued,
- .init = htb_init,
- .attach = htb_attach,
- .reset = htb_reset,
- .destroy = htb_destroy,
- .dump = htb_dump,
- .owner = THIS_MODULE,
- };
- static int __init htb_module_init(void)
- {
- return register_qdisc(&htb_qdisc_ops);
- }
- static void __exit htb_module_exit(void)
- {
- unregister_qdisc(&htb_qdisc_ops);
- }
- module_init(htb_module_init)
- module_exit(htb_module_exit)
- MODULE_LICENSE("GPL");
|