sch_generic.h 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_SCHED_GENERIC_H
  3. #define __NET_SCHED_GENERIC_H
  4. #include <linux/netdevice.h>
  5. #include <linux/types.h>
  6. #include <linux/rcupdate.h>
  7. #include <linux/pkt_sched.h>
  8. #include <linux/pkt_cls.h>
  9. #include <linux/percpu.h>
  10. #include <linux/dynamic_queue_limits.h>
  11. #include <linux/list.h>
  12. #include <linux/refcount.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/mutex.h>
  15. #include <linux/rwsem.h>
  16. #include <linux/atomic.h>
  17. #include <linux/hashtable.h>
  18. #include <linux/android_kabi.h>
  19. #include <net/gen_stats.h>
  20. #include <net/rtnetlink.h>
  21. #include <net/flow_offload.h>
  22. struct Qdisc_ops;
  23. struct qdisc_walker;
  24. struct tcf_walker;
  25. struct module;
  26. struct bpf_flow_keys;
  27. struct qdisc_rate_table {
  28. struct tc_ratespec rate;
  29. u32 data[256];
  30. struct qdisc_rate_table *next;
  31. int refcnt;
  32. };
  33. enum qdisc_state_t {
  34. __QDISC_STATE_SCHED,
  35. __QDISC_STATE_DEACTIVATED,
  36. __QDISC_STATE_MISSED,
  37. __QDISC_STATE_DRAINING,
  38. };
  39. enum qdisc_state2_t {
  40. /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
  41. * Use qdisc_run_begin/end() or qdisc_is_running() instead.
  42. */
  43. __QDISC_STATE2_RUNNING,
  44. };
  45. #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
  46. #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
  47. #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
  48. QDISC_STATE_DRAINING)
  49. struct qdisc_size_table {
  50. struct rcu_head rcu;
  51. struct list_head list;
  52. struct tc_sizespec szopts;
  53. int refcnt;
  54. u16 data[];
  55. };
  56. /* similar to sk_buff_head, but skb->prev pointer is undefined. */
  57. struct qdisc_skb_head {
  58. struct sk_buff *head;
  59. struct sk_buff *tail;
  60. __u32 qlen;
  61. spinlock_t lock;
  62. };
  63. struct Qdisc {
  64. int (*enqueue)(struct sk_buff *skb,
  65. struct Qdisc *sch,
  66. struct sk_buff **to_free);
  67. struct sk_buff * (*dequeue)(struct Qdisc *sch);
  68. unsigned int flags;
  69. #define TCQ_F_BUILTIN 1
  70. #define TCQ_F_INGRESS 2
  71. #define TCQ_F_CAN_BYPASS 4
  72. #define TCQ_F_MQROOT 8
  73. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  74. * q->dev_queue : It can test
  75. * netif_xmit_frozen_or_stopped() before
  76. * dequeueing next packet.
  77. * Its true for MQ/MQPRIO slaves, or non
  78. * multiqueue device.
  79. */
  80. #define TCQ_F_WARN_NONWC (1 << 16)
  81. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  82. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  83. * qdisc_tree_decrease_qlen() should stop.
  84. */
  85. #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
  86. #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
  87. #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
  88. u32 limit;
  89. const struct Qdisc_ops *ops;
  90. struct qdisc_size_table __rcu *stab;
  91. struct hlist_node hash;
  92. u32 handle;
  93. u32 parent;
  94. struct netdev_queue *dev_queue;
  95. struct net_rate_estimator __rcu *rate_est;
  96. struct gnet_stats_basic_sync __percpu *cpu_bstats;
  97. struct gnet_stats_queue __percpu *cpu_qstats;
  98. int pad;
  99. refcount_t refcnt;
  100. /*
  101. * For performance sake on SMP, we put highly modified fields at the end
  102. */
  103. struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
  104. struct qdisc_skb_head q;
  105. struct gnet_stats_basic_sync bstats;
  106. struct gnet_stats_queue qstats;
  107. unsigned long state;
  108. unsigned long state2; /* must be written under qdisc spinlock */
  109. struct Qdisc *next_sched;
  110. struct sk_buff_head skb_bad_txq;
  111. spinlock_t busylock ____cacheline_aligned_in_smp;
  112. spinlock_t seqlock;
  113. struct rcu_head rcu;
  114. netdevice_tracker dev_tracker;
  115. ANDROID_KABI_RESERVE(1);
  116. /* private data */
  117. long privdata[] ____cacheline_aligned;
  118. };
  119. static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
  120. {
  121. if (qdisc->flags & TCQ_F_BUILTIN)
  122. return;
  123. refcount_inc(&qdisc->refcnt);
  124. }
  125. static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
  126. {
  127. if (qdisc->flags & TCQ_F_BUILTIN)
  128. return true;
  129. return refcount_dec_if_one(&qdisc->refcnt);
  130. }
  131. /* Intended to be used by unlocked users, when concurrent qdisc release is
  132. * possible.
  133. */
  134. static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
  135. {
  136. if (qdisc->flags & TCQ_F_BUILTIN)
  137. return qdisc;
  138. if (refcount_inc_not_zero(&qdisc->refcnt))
  139. return qdisc;
  140. return NULL;
  141. }
  142. /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
  143. * root_lock section, or provide their own memory barriers -- ordering
  144. * against qdisc_run_begin/end() atomic bit operations.
  145. */
  146. static inline bool qdisc_is_running(struct Qdisc *qdisc)
  147. {
  148. if (qdisc->flags & TCQ_F_NOLOCK)
  149. return spin_is_locked(&qdisc->seqlock);
  150. return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
  151. }
  152. static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
  153. {
  154. return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
  155. }
  156. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  157. {
  158. return q->flags & TCQ_F_CPUSTATS;
  159. }
  160. static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
  161. {
  162. if (qdisc_is_percpu_stats(qdisc))
  163. return nolock_qdisc_is_empty(qdisc);
  164. return !READ_ONCE(qdisc->q.qlen);
  165. }
  166. /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
  167. * the qdisc root lock acquired.
  168. */
  169. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  170. {
  171. if (qdisc->flags & TCQ_F_NOLOCK) {
  172. if (spin_trylock(&qdisc->seqlock))
  173. return true;
  174. /* No need to insist if the MISSED flag was already set.
  175. * Note that test_and_set_bit() also gives us memory ordering
  176. * guarantees wrt potential earlier enqueue() and below
  177. * spin_trylock(), both of which are necessary to prevent races
  178. */
  179. if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
  180. return false;
  181. /* Try to take the lock again to make sure that we will either
  182. * grab it or the CPU that still has it will see MISSED set
  183. * when testing it in qdisc_run_end()
  184. */
  185. return spin_trylock(&qdisc->seqlock);
  186. }
  187. return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
  188. }
  189. static inline void qdisc_run_end(struct Qdisc *qdisc)
  190. {
  191. if (qdisc->flags & TCQ_F_NOLOCK) {
  192. spin_unlock(&qdisc->seqlock);
  193. /* spin_unlock() only has store-release semantic. The unlock
  194. * and test_bit() ordering is a store-load ordering, so a full
  195. * memory barrier is needed here.
  196. */
  197. smp_mb();
  198. if (unlikely(test_bit(__QDISC_STATE_MISSED,
  199. &qdisc->state)))
  200. __netif_schedule(qdisc);
  201. } else {
  202. __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
  203. }
  204. }
  205. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  206. {
  207. return qdisc->flags & TCQ_F_ONETXQUEUE;
  208. }
  209. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  210. {
  211. #ifdef CONFIG_BQL
  212. /* Non-BQL migrated drivers will return 0, too. */
  213. return dql_avail(&txq->dql);
  214. #else
  215. return 0;
  216. #endif
  217. }
  218. struct Qdisc_class_ops {
  219. unsigned int flags;
  220. /* Child qdisc manipulation */
  221. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  222. int (*graft)(struct Qdisc *, unsigned long cl,
  223. struct Qdisc *, struct Qdisc **,
  224. struct netlink_ext_ack *extack);
  225. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  226. void (*qlen_notify)(struct Qdisc *, unsigned long);
  227. /* Class manipulation routines */
  228. unsigned long (*find)(struct Qdisc *, u32 classid);
  229. int (*change)(struct Qdisc *, u32, u32,
  230. struct nlattr **, unsigned long *,
  231. struct netlink_ext_ack *);
  232. int (*delete)(struct Qdisc *, unsigned long,
  233. struct netlink_ext_ack *);
  234. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  235. /* Filter manipulation */
  236. struct tcf_block * (*tcf_block)(struct Qdisc *sch,
  237. unsigned long arg,
  238. struct netlink_ext_ack *extack);
  239. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  240. u32 classid);
  241. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  242. /* rtnetlink specific */
  243. int (*dump)(struct Qdisc *, unsigned long,
  244. struct sk_buff *skb, struct tcmsg*);
  245. int (*dump_stats)(struct Qdisc *, unsigned long,
  246. struct gnet_dump *);
  247. ANDROID_KABI_RESERVE(1);
  248. };
  249. /* Qdisc_class_ops flag values */
  250. /* Implements API that doesn't require rtnl lock */
  251. enum qdisc_class_ops_flags {
  252. QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
  253. };
  254. struct Qdisc_ops {
  255. struct Qdisc_ops *next;
  256. const struct Qdisc_class_ops *cl_ops;
  257. char id[IFNAMSIZ];
  258. int priv_size;
  259. unsigned int static_flags;
  260. int (*enqueue)(struct sk_buff *skb,
  261. struct Qdisc *sch,
  262. struct sk_buff **to_free);
  263. struct sk_buff * (*dequeue)(struct Qdisc *);
  264. struct sk_buff * (*peek)(struct Qdisc *);
  265. int (*init)(struct Qdisc *sch, struct nlattr *arg,
  266. struct netlink_ext_ack *extack);
  267. void (*reset)(struct Qdisc *);
  268. void (*destroy)(struct Qdisc *);
  269. int (*change)(struct Qdisc *sch,
  270. struct nlattr *arg,
  271. struct netlink_ext_ack *extack);
  272. void (*attach)(struct Qdisc *sch);
  273. int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
  274. void (*change_real_num_tx)(struct Qdisc *sch,
  275. unsigned int new_real_tx);
  276. int (*dump)(struct Qdisc *, struct sk_buff *);
  277. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  278. void (*ingress_block_set)(struct Qdisc *sch,
  279. u32 block_index);
  280. void (*egress_block_set)(struct Qdisc *sch,
  281. u32 block_index);
  282. u32 (*ingress_block_get)(struct Qdisc *sch);
  283. u32 (*egress_block_get)(struct Qdisc *sch);
  284. struct module *owner;
  285. ANDROID_KABI_RESERVE(1);
  286. };
  287. struct tcf_result {
  288. union {
  289. struct {
  290. unsigned long class;
  291. u32 classid;
  292. };
  293. const struct tcf_proto *goto_tp;
  294. };
  295. };
  296. struct tcf_chain;
  297. struct tcf_proto_ops {
  298. struct list_head head;
  299. char kind[IFNAMSIZ];
  300. int (*classify)(struct sk_buff *,
  301. const struct tcf_proto *,
  302. struct tcf_result *);
  303. int (*init)(struct tcf_proto*);
  304. void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
  305. struct netlink_ext_ack *extack);
  306. void* (*get)(struct tcf_proto*, u32 handle);
  307. void (*put)(struct tcf_proto *tp, void *f);
  308. int (*change)(struct net *net, struct sk_buff *,
  309. struct tcf_proto*, unsigned long,
  310. u32 handle, struct nlattr **,
  311. void **, u32,
  312. struct netlink_ext_ack *);
  313. int (*delete)(struct tcf_proto *tp, void *arg,
  314. bool *last, bool rtnl_held,
  315. struct netlink_ext_ack *);
  316. bool (*delete_empty)(struct tcf_proto *tp);
  317. void (*walk)(struct tcf_proto *tp,
  318. struct tcf_walker *arg, bool rtnl_held);
  319. int (*reoffload)(struct tcf_proto *tp, bool add,
  320. flow_setup_cb_t *cb, void *cb_priv,
  321. struct netlink_ext_ack *extack);
  322. void (*hw_add)(struct tcf_proto *tp,
  323. void *type_data);
  324. void (*hw_del)(struct tcf_proto *tp,
  325. void *type_data);
  326. void (*bind_class)(void *, u32, unsigned long,
  327. void *, unsigned long);
  328. void * (*tmplt_create)(struct net *net,
  329. struct tcf_chain *chain,
  330. struct nlattr **tca,
  331. struct netlink_ext_ack *extack);
  332. void (*tmplt_destroy)(void *tmplt_priv);
  333. /* rtnetlink specific */
  334. int (*dump)(struct net*, struct tcf_proto*, void *,
  335. struct sk_buff *skb, struct tcmsg*,
  336. bool);
  337. int (*terse_dump)(struct net *net,
  338. struct tcf_proto *tp, void *fh,
  339. struct sk_buff *skb,
  340. struct tcmsg *t, bool rtnl_held);
  341. int (*tmplt_dump)(struct sk_buff *skb,
  342. struct net *net,
  343. void *tmplt_priv);
  344. struct module *owner;
  345. int flags;
  346. };
  347. /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
  348. * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
  349. * conditions can occur when filters are inserted/deleted simultaneously.
  350. */
  351. enum tcf_proto_ops_flags {
  352. TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
  353. };
  354. struct tcf_proto {
  355. /* Fast access part */
  356. struct tcf_proto __rcu *next;
  357. void __rcu *root;
  358. /* called under RCU BH lock*/
  359. int (*classify)(struct sk_buff *,
  360. const struct tcf_proto *,
  361. struct tcf_result *);
  362. __be16 protocol;
  363. /* All the rest */
  364. u32 prio;
  365. void *data;
  366. const struct tcf_proto_ops *ops;
  367. struct tcf_chain *chain;
  368. /* Lock protects tcf_proto shared state and can be used by unlocked
  369. * classifiers to protect their private data.
  370. */
  371. spinlock_t lock;
  372. bool deleting;
  373. refcount_t refcnt;
  374. struct rcu_head rcu;
  375. struct hlist_node destroy_ht_node;
  376. };
  377. struct qdisc_skb_cb {
  378. struct {
  379. unsigned int pkt_len;
  380. u16 slave_dev_queue_mapping;
  381. u16 tc_classid;
  382. };
  383. #define QDISC_CB_PRIV_LEN 20
  384. unsigned char data[QDISC_CB_PRIV_LEN];
  385. };
  386. typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
  387. struct tcf_chain {
  388. /* Protects filter_chain. */
  389. struct mutex filter_chain_lock;
  390. struct tcf_proto __rcu *filter_chain;
  391. struct list_head list;
  392. struct tcf_block *block;
  393. u32 index; /* chain index */
  394. unsigned int refcnt;
  395. unsigned int action_refcnt;
  396. bool explicitly_created;
  397. bool flushing;
  398. const struct tcf_proto_ops *tmplt_ops;
  399. void *tmplt_priv;
  400. struct rcu_head rcu;
  401. };
  402. struct tcf_block {
  403. /* Lock protects tcf_block and lifetime-management data of chains
  404. * attached to the block (refcnt, action_refcnt, explicitly_created).
  405. */
  406. struct mutex lock;
  407. struct list_head chain_list;
  408. u32 index; /* block index for shared blocks */
  409. u32 classid; /* which class this block belongs to */
  410. refcount_t refcnt;
  411. struct net *net;
  412. struct Qdisc *q;
  413. struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
  414. struct flow_block flow_block;
  415. struct list_head owner_list;
  416. bool keep_dst;
  417. atomic_t offloadcnt; /* Number of oddloaded filters */
  418. unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
  419. unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
  420. struct {
  421. struct tcf_chain *chain;
  422. struct list_head filter_chain_list;
  423. } chain0;
  424. struct rcu_head rcu;
  425. DECLARE_HASHTABLE(proto_destroy_ht, 7);
  426. struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
  427. };
  428. static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
  429. {
  430. return lockdep_is_held(&chain->filter_chain_lock);
  431. }
  432. static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
  433. {
  434. return lockdep_is_held(&tp->lock);
  435. }
  436. #define tcf_chain_dereference(p, chain) \
  437. rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
  438. #define tcf_proto_dereference(p, tp) \
  439. rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
  440. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  441. {
  442. struct qdisc_skb_cb *qcb;
  443. BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
  444. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  445. }
  446. static inline int qdisc_qlen(const struct Qdisc *q)
  447. {
  448. return q->q.qlen;
  449. }
  450. static inline int qdisc_qlen_sum(const struct Qdisc *q)
  451. {
  452. __u32 qlen = q->qstats.qlen;
  453. int i;
  454. if (qdisc_is_percpu_stats(q)) {
  455. for_each_possible_cpu(i)
  456. qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
  457. } else {
  458. qlen += q->q.qlen;
  459. }
  460. return qlen;
  461. }
  462. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  463. {
  464. return (struct qdisc_skb_cb *)skb->cb;
  465. }
  466. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  467. {
  468. return &qdisc->q.lock;
  469. }
  470. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  471. {
  472. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  473. return q;
  474. }
  475. static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
  476. {
  477. return rcu_dereference_bh(qdisc->dev_queue->qdisc);
  478. }
  479. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  480. {
  481. return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
  482. }
  483. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  484. {
  485. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  486. ASSERT_RTNL();
  487. return qdisc_lock(root);
  488. }
  489. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  490. {
  491. return qdisc->dev_queue->dev;
  492. }
  493. static inline void sch_tree_lock(struct Qdisc *q)
  494. {
  495. if (q->flags & TCQ_F_MQROOT)
  496. spin_lock_bh(qdisc_lock(q));
  497. else
  498. spin_lock_bh(qdisc_root_sleeping_lock(q));
  499. }
  500. static inline void sch_tree_unlock(struct Qdisc *q)
  501. {
  502. if (q->flags & TCQ_F_MQROOT)
  503. spin_unlock_bh(qdisc_lock(q));
  504. else
  505. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  506. }
  507. extern struct Qdisc noop_qdisc;
  508. extern struct Qdisc_ops noop_qdisc_ops;
  509. extern struct Qdisc_ops pfifo_fast_ops;
  510. extern struct Qdisc_ops mq_qdisc_ops;
  511. extern struct Qdisc_ops noqueue_qdisc_ops;
  512. extern const struct Qdisc_ops *default_qdisc_ops;
  513. static inline const struct Qdisc_ops *
  514. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  515. {
  516. return ntx < dev->real_num_tx_queues ?
  517. default_qdisc_ops : &pfifo_fast_ops;
  518. }
  519. struct Qdisc_class_common {
  520. u32 classid;
  521. struct hlist_node hnode;
  522. };
  523. struct Qdisc_class_hash {
  524. struct hlist_head *hash;
  525. unsigned int hashsize;
  526. unsigned int hashmask;
  527. unsigned int hashelems;
  528. };
  529. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  530. {
  531. id ^= id >> 8;
  532. id ^= id >> 4;
  533. return id & mask;
  534. }
  535. static inline struct Qdisc_class_common *
  536. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  537. {
  538. struct Qdisc_class_common *cl;
  539. unsigned int h;
  540. if (!id)
  541. return NULL;
  542. h = qdisc_class_hash(id, hash->hashmask);
  543. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  544. if (cl->classid == id)
  545. return cl;
  546. }
  547. return NULL;
  548. }
  549. static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
  550. {
  551. u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
  552. return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
  553. }
  554. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  555. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  556. struct Qdisc_class_common *);
  557. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  558. struct Qdisc_class_common *);
  559. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  560. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  561. int dev_qdisc_change_tx_queue_len(struct net_device *dev);
  562. void dev_qdisc_change_real_num_tx(struct net_device *dev,
  563. unsigned int new_real_tx);
  564. void dev_init_scheduler(struct net_device *dev);
  565. void dev_shutdown(struct net_device *dev);
  566. void dev_activate(struct net_device *dev);
  567. void dev_deactivate(struct net_device *dev);
  568. void dev_deactivate_many(struct list_head *head);
  569. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  570. struct Qdisc *qdisc);
  571. void qdisc_reset(struct Qdisc *qdisc);
  572. void qdisc_destroy(struct Qdisc *qdisc);
  573. void qdisc_put(struct Qdisc *qdisc);
  574. void qdisc_put_unlocked(struct Qdisc *qdisc);
  575. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
  576. #ifdef CONFIG_NET_SCHED
  577. int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
  578. void *type_data);
  579. void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
  580. struct Qdisc *new, struct Qdisc *old,
  581. enum tc_setup_type type, void *type_data,
  582. struct netlink_ext_ack *extack);
  583. #else
  584. static inline int
  585. qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
  586. void *type_data)
  587. {
  588. q->flags &= ~TCQ_F_OFFLOADED;
  589. return 0;
  590. }
  591. static inline void
  592. qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
  593. struct Qdisc *new, struct Qdisc *old,
  594. enum tc_setup_type type, void *type_data,
  595. struct netlink_ext_ack *extack)
  596. {
  597. }
  598. #endif
  599. void qdisc_offload_query_caps(struct net_device *dev,
  600. enum tc_setup_type type,
  601. void *caps, size_t caps_len);
  602. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  603. const struct Qdisc_ops *ops,
  604. struct netlink_ext_ack *extack);
  605. void qdisc_free(struct Qdisc *qdisc);
  606. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  607. const struct Qdisc_ops *ops, u32 parentid,
  608. struct netlink_ext_ack *extack);
  609. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  610. const struct qdisc_size_table *stab);
  611. int skb_do_redirect(struct sk_buff *);
  612. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  613. {
  614. #ifdef CONFIG_NET_CLS_ACT
  615. return skb->tc_at_ingress;
  616. #else
  617. return false;
  618. #endif
  619. }
  620. static inline bool skb_skip_tc_classify(struct sk_buff *skb)
  621. {
  622. #ifdef CONFIG_NET_CLS_ACT
  623. if (skb->tc_skip_classify) {
  624. skb->tc_skip_classify = 0;
  625. return true;
  626. }
  627. #endif
  628. return false;
  629. }
  630. /* Reset all TX qdiscs greater than index of a device. */
  631. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  632. {
  633. struct Qdisc *qdisc;
  634. for (; i < dev->num_tx_queues; i++) {
  635. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  636. if (qdisc) {
  637. spin_lock_bh(qdisc_lock(qdisc));
  638. qdisc_reset(qdisc);
  639. spin_unlock_bh(qdisc_lock(qdisc));
  640. }
  641. }
  642. }
  643. /* Are all TX queues of the device empty? */
  644. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  645. {
  646. unsigned int i;
  647. rcu_read_lock();
  648. for (i = 0; i < dev->num_tx_queues; i++) {
  649. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  650. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  651. if (!qdisc_is_empty(q)) {
  652. rcu_read_unlock();
  653. return false;
  654. }
  655. }
  656. rcu_read_unlock();
  657. return true;
  658. }
  659. /* Are any of the TX qdiscs changing? */
  660. static inline bool qdisc_tx_changing(const struct net_device *dev)
  661. {
  662. unsigned int i;
  663. for (i = 0; i < dev->num_tx_queues; i++) {
  664. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  665. if (rcu_access_pointer(txq->qdisc) !=
  666. rcu_access_pointer(txq->qdisc_sleeping))
  667. return true;
  668. }
  669. return false;
  670. }
  671. /* Is the device using the noop qdisc on all queues? */
  672. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  673. {
  674. unsigned int i;
  675. for (i = 0; i < dev->num_tx_queues; i++) {
  676. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  677. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  678. return false;
  679. }
  680. return true;
  681. }
  682. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  683. {
  684. return qdisc_skb_cb(skb)->pkt_len;
  685. }
  686. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  687. enum net_xmit_qdisc_t {
  688. __NET_XMIT_STOLEN = 0x00010000,
  689. __NET_XMIT_BYPASS = 0x00020000,
  690. };
  691. #ifdef CONFIG_NET_CLS_ACT
  692. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  693. #else
  694. #define net_xmit_drop_count(e) (1)
  695. #endif
  696. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  697. const struct Qdisc *sch)
  698. {
  699. #ifdef CONFIG_NET_SCHED
  700. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  701. if (stab)
  702. __qdisc_calculate_pkt_len(skb, stab);
  703. #endif
  704. }
  705. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  706. struct sk_buff **to_free)
  707. {
  708. qdisc_calculate_pkt_len(skb, sch);
  709. return sch->enqueue(skb, sch, to_free);
  710. }
  711. static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
  712. __u64 bytes, __u32 packets)
  713. {
  714. u64_stats_update_begin(&bstats->syncp);
  715. u64_stats_add(&bstats->bytes, bytes);
  716. u64_stats_add(&bstats->packets, packets);
  717. u64_stats_update_end(&bstats->syncp);
  718. }
  719. static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
  720. const struct sk_buff *skb)
  721. {
  722. _bstats_update(bstats,
  723. qdisc_pkt_len(skb),
  724. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  725. }
  726. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  727. const struct sk_buff *skb)
  728. {
  729. bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
  730. }
  731. static inline void qdisc_bstats_update(struct Qdisc *sch,
  732. const struct sk_buff *skb)
  733. {
  734. bstats_update(&sch->bstats, skb);
  735. }
  736. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  737. const struct sk_buff *skb)
  738. {
  739. sch->qstats.backlog -= qdisc_pkt_len(skb);
  740. }
  741. static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
  742. const struct sk_buff *skb)
  743. {
  744. this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
  745. }
  746. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  747. const struct sk_buff *skb)
  748. {
  749. sch->qstats.backlog += qdisc_pkt_len(skb);
  750. }
  751. static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
  752. const struct sk_buff *skb)
  753. {
  754. this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
  755. }
  756. static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
  757. {
  758. this_cpu_inc(sch->cpu_qstats->qlen);
  759. }
  760. static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
  761. {
  762. this_cpu_dec(sch->cpu_qstats->qlen);
  763. }
  764. static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
  765. {
  766. this_cpu_inc(sch->cpu_qstats->requeues);
  767. }
  768. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  769. {
  770. sch->qstats.drops += count;
  771. }
  772. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  773. {
  774. qstats->drops++;
  775. }
  776. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  777. {
  778. qstats->overlimits++;
  779. }
  780. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  781. {
  782. qstats_drop_inc(&sch->qstats);
  783. }
  784. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  785. {
  786. this_cpu_inc(sch->cpu_qstats->drops);
  787. }
  788. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  789. {
  790. sch->qstats.overlimits++;
  791. }
  792. static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
  793. {
  794. __u32 qlen = qdisc_qlen_sum(sch);
  795. return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
  796. }
  797. static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
  798. __u32 *backlog)
  799. {
  800. struct gnet_stats_queue qstats = { 0 };
  801. gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
  802. *qlen = qstats.qlen + qdisc_qlen(sch);
  803. *backlog = qstats.backlog;
  804. }
  805. static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
  806. {
  807. __u32 qlen, backlog;
  808. qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
  809. qdisc_tree_reduce_backlog(sch, qlen, backlog);
  810. }
  811. static inline void qdisc_purge_queue(struct Qdisc *sch)
  812. {
  813. __u32 qlen, backlog;
  814. qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
  815. qdisc_reset(sch);
  816. qdisc_tree_reduce_backlog(sch, qlen, backlog);
  817. }
  818. static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
  819. struct qdisc_skb_head *qh)
  820. {
  821. struct sk_buff *last = qh->tail;
  822. if (last) {
  823. skb->next = NULL;
  824. last->next = skb;
  825. qh->tail = skb;
  826. } else {
  827. qh->tail = skb;
  828. qh->head = skb;
  829. }
  830. qh->qlen++;
  831. }
  832. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  833. {
  834. __qdisc_enqueue_tail(skb, &sch->q);
  835. qdisc_qstats_backlog_inc(sch, skb);
  836. return NET_XMIT_SUCCESS;
  837. }
  838. static inline void __qdisc_enqueue_head(struct sk_buff *skb,
  839. struct qdisc_skb_head *qh)
  840. {
  841. skb->next = qh->head;
  842. if (!qh->head)
  843. qh->tail = skb;
  844. qh->head = skb;
  845. qh->qlen++;
  846. }
  847. static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
  848. {
  849. struct sk_buff *skb = qh->head;
  850. if (likely(skb != NULL)) {
  851. qh->head = skb->next;
  852. qh->qlen--;
  853. if (qh->head == NULL)
  854. qh->tail = NULL;
  855. skb->next = NULL;
  856. }
  857. return skb;
  858. }
  859. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  860. {
  861. struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  862. if (likely(skb != NULL)) {
  863. qdisc_qstats_backlog_dec(sch, skb);
  864. qdisc_bstats_update(sch, skb);
  865. }
  866. return skb;
  867. }
  868. /* Instead of calling kfree_skb() while root qdisc lock is held,
  869. * queue the skb for future freeing at end of __dev_xmit_skb()
  870. */
  871. static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
  872. {
  873. skb->next = *to_free;
  874. *to_free = skb;
  875. }
  876. static inline void __qdisc_drop_all(struct sk_buff *skb,
  877. struct sk_buff **to_free)
  878. {
  879. if (skb->prev)
  880. skb->prev->next = *to_free;
  881. else
  882. skb->next = *to_free;
  883. *to_free = skb;
  884. }
  885. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  886. struct qdisc_skb_head *qh,
  887. struct sk_buff **to_free)
  888. {
  889. struct sk_buff *skb = __qdisc_dequeue_head(qh);
  890. if (likely(skb != NULL)) {
  891. unsigned int len = qdisc_pkt_len(skb);
  892. qdisc_qstats_backlog_dec(sch, skb);
  893. __qdisc_drop(skb, to_free);
  894. return len;
  895. }
  896. return 0;
  897. }
  898. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  899. {
  900. const struct qdisc_skb_head *qh = &sch->q;
  901. return qh->head;
  902. }
  903. /* generic pseudo peek method for non-work-conserving qdisc */
  904. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  905. {
  906. struct sk_buff *skb = skb_peek(&sch->gso_skb);
  907. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  908. if (!skb) {
  909. skb = sch->dequeue(sch);
  910. if (skb) {
  911. __skb_queue_head(&sch->gso_skb, skb);
  912. /* it's still part of the queue */
  913. qdisc_qstats_backlog_inc(sch, skb);
  914. sch->q.qlen++;
  915. }
  916. }
  917. return skb;
  918. }
  919. static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
  920. struct sk_buff *skb)
  921. {
  922. if (qdisc_is_percpu_stats(sch)) {
  923. qdisc_qstats_cpu_backlog_dec(sch, skb);
  924. qdisc_bstats_cpu_update(sch, skb);
  925. qdisc_qstats_cpu_qlen_dec(sch);
  926. } else {
  927. qdisc_qstats_backlog_dec(sch, skb);
  928. qdisc_bstats_update(sch, skb);
  929. sch->q.qlen--;
  930. }
  931. }
  932. static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
  933. unsigned int pkt_len)
  934. {
  935. if (qdisc_is_percpu_stats(sch)) {
  936. qdisc_qstats_cpu_qlen_inc(sch);
  937. this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
  938. } else {
  939. sch->qstats.backlog += pkt_len;
  940. sch->q.qlen++;
  941. }
  942. }
  943. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  944. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  945. {
  946. struct sk_buff *skb = skb_peek(&sch->gso_skb);
  947. if (skb) {
  948. skb = __skb_dequeue(&sch->gso_skb);
  949. if (qdisc_is_percpu_stats(sch)) {
  950. qdisc_qstats_cpu_backlog_dec(sch, skb);
  951. qdisc_qstats_cpu_qlen_dec(sch);
  952. } else {
  953. qdisc_qstats_backlog_dec(sch, skb);
  954. sch->q.qlen--;
  955. }
  956. } else {
  957. skb = sch->dequeue(sch);
  958. }
  959. return skb;
  960. }
  961. static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
  962. {
  963. /*
  964. * We do not know the backlog in bytes of this list, it
  965. * is up to the caller to correct it
  966. */
  967. ASSERT_RTNL();
  968. if (qh->qlen) {
  969. rtnl_kfree_skbs(qh->head, qh->tail);
  970. qh->head = NULL;
  971. qh->tail = NULL;
  972. qh->qlen = 0;
  973. }
  974. }
  975. static inline void qdisc_reset_queue(struct Qdisc *sch)
  976. {
  977. __qdisc_reset_queue(&sch->q);
  978. }
  979. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  980. struct Qdisc **pold)
  981. {
  982. struct Qdisc *old;
  983. sch_tree_lock(sch);
  984. old = *pold;
  985. *pold = new;
  986. if (old != NULL)
  987. qdisc_purge_queue(old);
  988. sch_tree_unlock(sch);
  989. return old;
  990. }
  991. static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  992. {
  993. rtnl_kfree_skbs(skb, skb);
  994. qdisc_qstats_drop(sch);
  995. }
  996. static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
  997. struct sk_buff **to_free)
  998. {
  999. __qdisc_drop(skb, to_free);
  1000. qdisc_qstats_cpu_drop(sch);
  1001. return NET_XMIT_DROP;
  1002. }
  1003. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
  1004. struct sk_buff **to_free)
  1005. {
  1006. __qdisc_drop(skb, to_free);
  1007. qdisc_qstats_drop(sch);
  1008. return NET_XMIT_DROP;
  1009. }
  1010. static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
  1011. struct sk_buff **to_free)
  1012. {
  1013. __qdisc_drop_all(skb, to_free);
  1014. qdisc_qstats_drop(sch);
  1015. return NET_XMIT_DROP;
  1016. }
  1017. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  1018. long it will take to send a packet given its size.
  1019. */
  1020. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  1021. {
  1022. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  1023. if (slot < 0)
  1024. slot = 0;
  1025. slot >>= rtab->rate.cell_log;
  1026. if (slot > 255)
  1027. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  1028. return rtab->data[slot];
  1029. }
  1030. struct psched_ratecfg {
  1031. u64 rate_bytes_ps; /* bytes per second */
  1032. u32 mult;
  1033. u16 overhead;
  1034. u16 mpu;
  1035. u8 linklayer;
  1036. u8 shift;
  1037. };
  1038. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  1039. unsigned int len)
  1040. {
  1041. len += r->overhead;
  1042. if (len < r->mpu)
  1043. len = r->mpu;
  1044. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  1045. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  1046. return ((u64)len * r->mult) >> r->shift;
  1047. }
  1048. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  1049. const struct tc_ratespec *conf,
  1050. u64 rate64);
  1051. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  1052. const struct psched_ratecfg *r)
  1053. {
  1054. memset(res, 0, sizeof(*res));
  1055. /* legacy struct tc_ratespec has a 32bit @rate field
  1056. * Qdisc using 64bit rate should add new attributes
  1057. * in order to maintain compatibility.
  1058. */
  1059. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  1060. res->overhead = r->overhead;
  1061. res->mpu = r->mpu;
  1062. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  1063. }
  1064. struct psched_pktrate {
  1065. u64 rate_pkts_ps; /* packets per second */
  1066. u32 mult;
  1067. u8 shift;
  1068. };
  1069. static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
  1070. unsigned int pkt_num)
  1071. {
  1072. return ((u64)pkt_num * r->mult) >> r->shift;
  1073. }
  1074. void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
  1075. /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
  1076. * The fast path only needs to access filter list and to update stats
  1077. */
  1078. struct mini_Qdisc {
  1079. struct tcf_proto *filter_list;
  1080. struct tcf_block *block;
  1081. struct gnet_stats_basic_sync __percpu *cpu_bstats;
  1082. struct gnet_stats_queue __percpu *cpu_qstats;
  1083. unsigned long rcu_state;
  1084. };
  1085. static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
  1086. const struct sk_buff *skb)
  1087. {
  1088. bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
  1089. }
  1090. static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
  1091. {
  1092. this_cpu_inc(miniq->cpu_qstats->drops);
  1093. }
  1094. struct mini_Qdisc_pair {
  1095. struct mini_Qdisc miniq1;
  1096. struct mini_Qdisc miniq2;
  1097. struct mini_Qdisc __rcu **p_miniq;
  1098. };
  1099. void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
  1100. struct tcf_proto *tp_head);
  1101. void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
  1102. struct mini_Qdisc __rcu **p_miniq);
  1103. void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
  1104. struct tcf_block *block);
  1105. void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
  1106. int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
  1107. /* Make sure qdisc is no longer in SCHED state. */
  1108. static inline void qdisc_synchronize(const struct Qdisc *q)
  1109. {
  1110. while (test_bit(__QDISC_STATE_SCHED, &q->state))
  1111. msleep(1);
  1112. }
  1113. #endif