sch_fq_pie.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Flow Queue PIE discipline
  3. *
  4. * Copyright (C) 2019 Mohit P. Tahiliani <[email protected]>
  5. * Copyright (C) 2019 Sachin D. Patil <[email protected]>
  6. * Copyright (C) 2019 V. Saicharan <[email protected]>
  7. * Copyright (C) 2019 Mohit Bhasi <[email protected]>
  8. * Copyright (C) 2019 Leslie Monis <[email protected]>
  9. * Copyright (C) 2019 Gautam Ramakrishnan <[email protected]>
  10. */
  11. #include <linux/jhash.h>
  12. #include <linux/sizes.h>
  13. #include <linux/vmalloc.h>
  14. #include <net/pkt_cls.h>
  15. #include <net/pie.h>
  16. /* Flow Queue PIE
  17. *
  18. * Principles:
  19. * - Packets are classified on flows.
  20. * - This is a Stochastic model (as we use a hash, several flows might
  21. * be hashed to the same slot)
  22. * - Each flow has a PIE managed queue.
  23. * - Flows are linked onto two (Round Robin) lists,
  24. * so that new flows have priority on old ones.
  25. * - For a given flow, packets are not reordered.
  26. * - Drops during enqueue only.
  27. * - ECN capability is off by default.
  28. * - ECN threshold (if ECN is enabled) is at 10% by default.
  29. * - Uses timestamps to calculate queue delay by default.
  30. */
  31. /**
  32. * struct fq_pie_flow - contains data for each flow
  33. * @vars: pie vars associated with the flow
  34. * @deficit: number of remaining byte credits
  35. * @backlog: size of data in the flow
  36. * @qlen: number of packets in the flow
  37. * @flowchain: flowchain for the flow
  38. * @head: first packet in the flow
  39. * @tail: last packet in the flow
  40. */
  41. struct fq_pie_flow {
  42. struct pie_vars vars;
  43. s32 deficit;
  44. u32 backlog;
  45. u32 qlen;
  46. struct list_head flowchain;
  47. struct sk_buff *head;
  48. struct sk_buff *tail;
  49. };
  50. struct fq_pie_sched_data {
  51. struct tcf_proto __rcu *filter_list; /* optional external classifier */
  52. struct tcf_block *block;
  53. struct fq_pie_flow *flows;
  54. struct Qdisc *sch;
  55. struct list_head old_flows;
  56. struct list_head new_flows;
  57. struct pie_params p_params;
  58. u32 ecn_prob;
  59. u32 flows_cnt;
  60. u32 flows_cursor;
  61. u32 quantum;
  62. u32 memory_limit;
  63. u32 new_flow_count;
  64. u32 memory_usage;
  65. u32 overmemory;
  66. struct pie_stats stats;
  67. struct timer_list adapt_timer;
  68. };
  69. static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
  70. struct sk_buff *skb)
  71. {
  72. return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
  73. }
  74. static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
  75. int *qerr)
  76. {
  77. struct fq_pie_sched_data *q = qdisc_priv(sch);
  78. struct tcf_proto *filter;
  79. struct tcf_result res;
  80. int result;
  81. if (TC_H_MAJ(skb->priority) == sch->handle &&
  82. TC_H_MIN(skb->priority) > 0 &&
  83. TC_H_MIN(skb->priority) <= q->flows_cnt)
  84. return TC_H_MIN(skb->priority);
  85. filter = rcu_dereference_bh(q->filter_list);
  86. if (!filter)
  87. return fq_pie_hash(q, skb) + 1;
  88. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  89. result = tcf_classify(skb, NULL, filter, &res, false);
  90. if (result >= 0) {
  91. #ifdef CONFIG_NET_CLS_ACT
  92. switch (result) {
  93. case TC_ACT_STOLEN:
  94. case TC_ACT_QUEUED:
  95. case TC_ACT_TRAP:
  96. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  97. fallthrough;
  98. case TC_ACT_SHOT:
  99. return 0;
  100. }
  101. #endif
  102. if (TC_H_MIN(res.classid) <= q->flows_cnt)
  103. return TC_H_MIN(res.classid);
  104. }
  105. return 0;
  106. }
  107. /* add skb to flow queue (tail add) */
  108. static inline void flow_queue_add(struct fq_pie_flow *flow,
  109. struct sk_buff *skb)
  110. {
  111. if (!flow->head)
  112. flow->head = skb;
  113. else
  114. flow->tail->next = skb;
  115. flow->tail = skb;
  116. skb->next = NULL;
  117. }
  118. static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  119. struct sk_buff **to_free)
  120. {
  121. struct fq_pie_sched_data *q = qdisc_priv(sch);
  122. struct fq_pie_flow *sel_flow;
  123. int ret;
  124. u8 memory_limited = false;
  125. u8 enqueue = false;
  126. u32 pkt_len;
  127. u32 idx;
  128. /* Classifies packet into corresponding flow */
  129. idx = fq_pie_classify(skb, sch, &ret);
  130. if (idx == 0) {
  131. if (ret & __NET_XMIT_BYPASS)
  132. qdisc_qstats_drop(sch);
  133. __qdisc_drop(skb, to_free);
  134. return ret;
  135. }
  136. idx--;
  137. sel_flow = &q->flows[idx];
  138. /* Checks whether adding a new packet would exceed memory limit */
  139. get_pie_cb(skb)->mem_usage = skb->truesize;
  140. memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
  141. /* Checks if the qdisc is full */
  142. if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
  143. q->stats.overlimit++;
  144. goto out;
  145. } else if (unlikely(memory_limited)) {
  146. q->overmemory++;
  147. }
  148. if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
  149. sel_flow->backlog, skb->len)) {
  150. enqueue = true;
  151. } else if (q->p_params.ecn &&
  152. sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
  153. INET_ECN_set_ce(skb)) {
  154. /* If packet is ecn capable, mark it if drop probability
  155. * is lower than the parameter ecn_prob, else drop it.
  156. */
  157. q->stats.ecn_mark++;
  158. enqueue = true;
  159. }
  160. if (enqueue) {
  161. /* Set enqueue time only when dq_rate_estimator is disabled. */
  162. if (!q->p_params.dq_rate_estimator)
  163. pie_set_enqueue_time(skb);
  164. pkt_len = qdisc_pkt_len(skb);
  165. q->stats.packets_in++;
  166. q->memory_usage += skb->truesize;
  167. sch->qstats.backlog += pkt_len;
  168. sch->q.qlen++;
  169. flow_queue_add(sel_flow, skb);
  170. if (list_empty(&sel_flow->flowchain)) {
  171. list_add_tail(&sel_flow->flowchain, &q->new_flows);
  172. q->new_flow_count++;
  173. sel_flow->deficit = q->quantum;
  174. sel_flow->qlen = 0;
  175. sel_flow->backlog = 0;
  176. }
  177. sel_flow->qlen++;
  178. sel_flow->backlog += pkt_len;
  179. return NET_XMIT_SUCCESS;
  180. }
  181. out:
  182. q->stats.dropped++;
  183. sel_flow->vars.accu_prob = 0;
  184. __qdisc_drop(skb, to_free);
  185. qdisc_qstats_drop(sch);
  186. return NET_XMIT_CN;
  187. }
  188. static struct netlink_range_validation fq_pie_q_range = {
  189. .min = 1,
  190. .max = 1 << 20,
  191. };
  192. static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
  193. [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
  194. [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
  195. [TCA_FQ_PIE_TARGET] = {.type = NLA_U32},
  196. [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
  197. [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
  198. [TCA_FQ_PIE_BETA] = {.type = NLA_U32},
  199. [TCA_FQ_PIE_QUANTUM] =
  200. NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
  201. [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
  202. [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
  203. [TCA_FQ_PIE_ECN] = {.type = NLA_U32},
  204. [TCA_FQ_PIE_BYTEMODE] = {.type = NLA_U32},
  205. [TCA_FQ_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
  206. };
  207. static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
  208. {
  209. struct sk_buff *skb = flow->head;
  210. flow->head = skb->next;
  211. skb->next = NULL;
  212. return skb;
  213. }
  214. static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
  215. {
  216. struct fq_pie_sched_data *q = qdisc_priv(sch);
  217. struct sk_buff *skb = NULL;
  218. struct fq_pie_flow *flow;
  219. struct list_head *head;
  220. u32 pkt_len;
  221. begin:
  222. head = &q->new_flows;
  223. if (list_empty(head)) {
  224. head = &q->old_flows;
  225. if (list_empty(head))
  226. return NULL;
  227. }
  228. flow = list_first_entry(head, struct fq_pie_flow, flowchain);
  229. /* Flow has exhausted all its credits */
  230. if (flow->deficit <= 0) {
  231. flow->deficit += q->quantum;
  232. list_move_tail(&flow->flowchain, &q->old_flows);
  233. goto begin;
  234. }
  235. if (flow->head) {
  236. skb = dequeue_head(flow);
  237. pkt_len = qdisc_pkt_len(skb);
  238. sch->qstats.backlog -= pkt_len;
  239. sch->q.qlen--;
  240. qdisc_bstats_update(sch, skb);
  241. }
  242. if (!skb) {
  243. /* force a pass through old_flows to prevent starvation */
  244. if (head == &q->new_flows && !list_empty(&q->old_flows))
  245. list_move_tail(&flow->flowchain, &q->old_flows);
  246. else
  247. list_del_init(&flow->flowchain);
  248. goto begin;
  249. }
  250. flow->qlen--;
  251. flow->deficit -= pkt_len;
  252. flow->backlog -= pkt_len;
  253. q->memory_usage -= get_pie_cb(skb)->mem_usage;
  254. pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
  255. return skb;
  256. }
  257. static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
  258. struct netlink_ext_ack *extack)
  259. {
  260. struct fq_pie_sched_data *q = qdisc_priv(sch);
  261. struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
  262. unsigned int len_dropped = 0;
  263. unsigned int num_dropped = 0;
  264. int err;
  265. err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
  266. if (err < 0)
  267. return err;
  268. sch_tree_lock(sch);
  269. if (tb[TCA_FQ_PIE_LIMIT]) {
  270. u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
  271. q->p_params.limit = limit;
  272. sch->limit = limit;
  273. }
  274. if (tb[TCA_FQ_PIE_FLOWS]) {
  275. if (q->flows) {
  276. NL_SET_ERR_MSG_MOD(extack,
  277. "Number of flows cannot be changed");
  278. goto flow_error;
  279. }
  280. q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
  281. if (!q->flows_cnt || q->flows_cnt > 65536) {
  282. NL_SET_ERR_MSG_MOD(extack,
  283. "Number of flows must range in [1..65536]");
  284. goto flow_error;
  285. }
  286. }
  287. /* convert from microseconds to pschedtime */
  288. if (tb[TCA_FQ_PIE_TARGET]) {
  289. /* target is in us */
  290. u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
  291. /* convert to pschedtime */
  292. q->p_params.target =
  293. PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
  294. }
  295. /* tupdate is in jiffies */
  296. if (tb[TCA_FQ_PIE_TUPDATE])
  297. q->p_params.tupdate =
  298. usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]));
  299. if (tb[TCA_FQ_PIE_ALPHA])
  300. q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]);
  301. if (tb[TCA_FQ_PIE_BETA])
  302. q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]);
  303. if (tb[TCA_FQ_PIE_QUANTUM])
  304. q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]);
  305. if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
  306. q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]);
  307. if (tb[TCA_FQ_PIE_ECN_PROB])
  308. q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]);
  309. if (tb[TCA_FQ_PIE_ECN])
  310. q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]);
  311. if (tb[TCA_FQ_PIE_BYTEMODE])
  312. q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]);
  313. if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
  314. q->p_params.dq_rate_estimator =
  315. nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]);
  316. /* Drop excess packets if new limit is lower */
  317. while (sch->q.qlen > sch->limit) {
  318. struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
  319. len_dropped += qdisc_pkt_len(skb);
  320. num_dropped += 1;
  321. rtnl_kfree_skbs(skb, skb);
  322. }
  323. qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
  324. sch_tree_unlock(sch);
  325. return 0;
  326. flow_error:
  327. sch_tree_unlock(sch);
  328. return -EINVAL;
  329. }
  330. static void fq_pie_timer(struct timer_list *t)
  331. {
  332. struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
  333. unsigned long next, tupdate;
  334. struct Qdisc *sch = q->sch;
  335. spinlock_t *root_lock; /* to lock qdisc for probability calculations */
  336. int max_cnt, i;
  337. rcu_read_lock();
  338. root_lock = qdisc_lock(qdisc_root_sleeping(sch));
  339. spin_lock(root_lock);
  340. /* Limit this expensive loop to 2048 flows per round. */
  341. max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
  342. for (i = 0; i < max_cnt; i++) {
  343. pie_calculate_probability(&q->p_params,
  344. &q->flows[q->flows_cursor].vars,
  345. q->flows[q->flows_cursor].backlog);
  346. q->flows_cursor++;
  347. }
  348. tupdate = q->p_params.tupdate;
  349. next = 0;
  350. if (q->flows_cursor >= q->flows_cnt) {
  351. q->flows_cursor = 0;
  352. next = tupdate;
  353. }
  354. if (tupdate)
  355. mod_timer(&q->adapt_timer, jiffies + next);
  356. spin_unlock(root_lock);
  357. rcu_read_unlock();
  358. }
  359. static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
  360. struct netlink_ext_ack *extack)
  361. {
  362. struct fq_pie_sched_data *q = qdisc_priv(sch);
  363. int err;
  364. u32 idx;
  365. pie_params_init(&q->p_params);
  366. sch->limit = 10 * 1024;
  367. q->p_params.limit = sch->limit;
  368. q->quantum = psched_mtu(qdisc_dev(sch));
  369. q->sch = sch;
  370. q->ecn_prob = 10;
  371. q->flows_cnt = 1024;
  372. q->memory_limit = SZ_32M;
  373. INIT_LIST_HEAD(&q->new_flows);
  374. INIT_LIST_HEAD(&q->old_flows);
  375. timer_setup(&q->adapt_timer, fq_pie_timer, 0);
  376. if (opt) {
  377. err = fq_pie_change(sch, opt, extack);
  378. if (err)
  379. return err;
  380. }
  381. err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
  382. if (err)
  383. goto init_failure;
  384. q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
  385. GFP_KERNEL);
  386. if (!q->flows) {
  387. err = -ENOMEM;
  388. goto init_failure;
  389. }
  390. for (idx = 0; idx < q->flows_cnt; idx++) {
  391. struct fq_pie_flow *flow = q->flows + idx;
  392. INIT_LIST_HEAD(&flow->flowchain);
  393. pie_vars_init(&flow->vars);
  394. }
  395. mod_timer(&q->adapt_timer, jiffies + HZ / 2);
  396. return 0;
  397. init_failure:
  398. q->flows_cnt = 0;
  399. return err;
  400. }
  401. static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
  402. {
  403. struct fq_pie_sched_data *q = qdisc_priv(sch);
  404. struct nlattr *opts;
  405. opts = nla_nest_start(skb, TCA_OPTIONS);
  406. if (!opts)
  407. return -EMSGSIZE;
  408. /* convert target from pschedtime to us */
  409. if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) ||
  410. nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) ||
  411. nla_put_u32(skb, TCA_FQ_PIE_TARGET,
  412. ((u32)PSCHED_TICKS2NS(q->p_params.target)) /
  413. NSEC_PER_USEC) ||
  414. nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
  415. jiffies_to_usecs(q->p_params.tupdate)) ||
  416. nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) ||
  417. nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) ||
  418. nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) ||
  419. nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) ||
  420. nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) ||
  421. nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) ||
  422. nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) ||
  423. nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
  424. q->p_params.dq_rate_estimator))
  425. goto nla_put_failure;
  426. return nla_nest_end(skb, opts);
  427. nla_put_failure:
  428. nla_nest_cancel(skb, opts);
  429. return -EMSGSIZE;
  430. }
  431. static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  432. {
  433. struct fq_pie_sched_data *q = qdisc_priv(sch);
  434. struct tc_fq_pie_xstats st = {
  435. .packets_in = q->stats.packets_in,
  436. .overlimit = q->stats.overlimit,
  437. .overmemory = q->overmemory,
  438. .dropped = q->stats.dropped,
  439. .ecn_mark = q->stats.ecn_mark,
  440. .new_flow_count = q->new_flow_count,
  441. .memory_usage = q->memory_usage,
  442. };
  443. struct list_head *pos;
  444. sch_tree_lock(sch);
  445. list_for_each(pos, &q->new_flows)
  446. st.new_flows_len++;
  447. list_for_each(pos, &q->old_flows)
  448. st.old_flows_len++;
  449. sch_tree_unlock(sch);
  450. return gnet_stats_copy_app(d, &st, sizeof(st));
  451. }
  452. static void fq_pie_reset(struct Qdisc *sch)
  453. {
  454. struct fq_pie_sched_data *q = qdisc_priv(sch);
  455. u32 idx;
  456. INIT_LIST_HEAD(&q->new_flows);
  457. INIT_LIST_HEAD(&q->old_flows);
  458. for (idx = 0; idx < q->flows_cnt; idx++) {
  459. struct fq_pie_flow *flow = q->flows + idx;
  460. /* Removes all packets from flow */
  461. rtnl_kfree_skbs(flow->head, flow->tail);
  462. flow->head = NULL;
  463. INIT_LIST_HEAD(&flow->flowchain);
  464. pie_vars_init(&flow->vars);
  465. }
  466. }
  467. static void fq_pie_destroy(struct Qdisc *sch)
  468. {
  469. struct fq_pie_sched_data *q = qdisc_priv(sch);
  470. tcf_block_put(q->block);
  471. q->p_params.tupdate = 0;
  472. del_timer_sync(&q->adapt_timer);
  473. kvfree(q->flows);
  474. }
  475. static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
  476. .id = "fq_pie",
  477. .priv_size = sizeof(struct fq_pie_sched_data),
  478. .enqueue = fq_pie_qdisc_enqueue,
  479. .dequeue = fq_pie_qdisc_dequeue,
  480. .peek = qdisc_peek_dequeued,
  481. .init = fq_pie_init,
  482. .destroy = fq_pie_destroy,
  483. .reset = fq_pie_reset,
  484. .change = fq_pie_change,
  485. .dump = fq_pie_dump,
  486. .dump_stats = fq_pie_dump_stats,
  487. .owner = THIS_MODULE,
  488. };
  489. static int __init fq_pie_module_init(void)
  490. {
  491. return register_qdisc(&fq_pie_qdisc_ops);
  492. }
  493. static void __exit fq_pie_module_exit(void)
  494. {
  495. unregister_qdisc(&fq_pie_qdisc_ops);
  496. }
  497. module_init(fq_pie_module_init);
  498. module_exit(fq_pie_module_exit);
  499. MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
  500. MODULE_AUTHOR("Mohit P. Tahiliani");
  501. MODULE_LICENSE("GPL");