sch_drr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * net/sched/sch_drr.c Deficit Round Robin scheduler
  4. *
  5. * Copyright (c) 2008 Patrick McHardy <[email protected]>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/slab.h>
  9. #include <linux/init.h>
  10. #include <linux/errno.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/pkt_sched.h>
  13. #include <net/sch_generic.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/pkt_cls.h>
  16. struct drr_class {
  17. struct Qdisc_class_common common;
  18. unsigned int filter_cnt;
  19. struct gnet_stats_basic_sync bstats;
  20. struct gnet_stats_queue qstats;
  21. struct net_rate_estimator __rcu *rate_est;
  22. struct list_head alist;
  23. struct Qdisc *qdisc;
  24. u32 quantum;
  25. u32 deficit;
  26. };
  27. struct drr_sched {
  28. struct list_head active;
  29. struct tcf_proto __rcu *filter_list;
  30. struct tcf_block *block;
  31. struct Qdisc_class_hash clhash;
  32. };
  33. static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
  34. {
  35. struct drr_sched *q = qdisc_priv(sch);
  36. struct Qdisc_class_common *clc;
  37. clc = qdisc_class_find(&q->clhash, classid);
  38. if (clc == NULL)
  39. return NULL;
  40. return container_of(clc, struct drr_class, common);
  41. }
  42. static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
  43. [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
  44. };
  45. static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  46. struct nlattr **tca, unsigned long *arg,
  47. struct netlink_ext_ack *extack)
  48. {
  49. struct drr_sched *q = qdisc_priv(sch);
  50. struct drr_class *cl = (struct drr_class *)*arg;
  51. struct nlattr *opt = tca[TCA_OPTIONS];
  52. struct nlattr *tb[TCA_DRR_MAX + 1];
  53. u32 quantum;
  54. int err;
  55. if (!opt) {
  56. NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
  57. return -EINVAL;
  58. }
  59. err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
  60. extack);
  61. if (err < 0)
  62. return err;
  63. if (tb[TCA_DRR_QUANTUM]) {
  64. quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
  65. if (quantum == 0) {
  66. NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
  67. return -EINVAL;
  68. }
  69. } else
  70. quantum = psched_mtu(qdisc_dev(sch));
  71. if (cl != NULL) {
  72. if (tca[TCA_RATE]) {
  73. err = gen_replace_estimator(&cl->bstats, NULL,
  74. &cl->rate_est,
  75. NULL, true,
  76. tca[TCA_RATE]);
  77. if (err) {
  78. NL_SET_ERR_MSG(extack, "Failed to replace estimator");
  79. return err;
  80. }
  81. }
  82. sch_tree_lock(sch);
  83. if (tb[TCA_DRR_QUANTUM])
  84. cl->quantum = quantum;
  85. sch_tree_unlock(sch);
  86. return 0;
  87. }
  88. cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
  89. if (cl == NULL)
  90. return -ENOBUFS;
  91. gnet_stats_basic_sync_init(&cl->bstats);
  92. cl->common.classid = classid;
  93. cl->quantum = quantum;
  94. cl->qdisc = qdisc_create_dflt(sch->dev_queue,
  95. &pfifo_qdisc_ops, classid,
  96. NULL);
  97. if (cl->qdisc == NULL)
  98. cl->qdisc = &noop_qdisc;
  99. else
  100. qdisc_hash_add(cl->qdisc, true);
  101. if (tca[TCA_RATE]) {
  102. err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
  103. NULL, true, tca[TCA_RATE]);
  104. if (err) {
  105. NL_SET_ERR_MSG(extack, "Failed to replace estimator");
  106. qdisc_put(cl->qdisc);
  107. kfree(cl);
  108. return err;
  109. }
  110. }
  111. sch_tree_lock(sch);
  112. qdisc_class_hash_insert(&q->clhash, &cl->common);
  113. sch_tree_unlock(sch);
  114. qdisc_class_hash_grow(sch, &q->clhash);
  115. *arg = (unsigned long)cl;
  116. return 0;
  117. }
  118. static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
  119. {
  120. gen_kill_estimator(&cl->rate_est);
  121. qdisc_put(cl->qdisc);
  122. kfree(cl);
  123. }
  124. static int drr_delete_class(struct Qdisc *sch, unsigned long arg,
  125. struct netlink_ext_ack *extack)
  126. {
  127. struct drr_sched *q = qdisc_priv(sch);
  128. struct drr_class *cl = (struct drr_class *)arg;
  129. if (cl->filter_cnt > 0)
  130. return -EBUSY;
  131. sch_tree_lock(sch);
  132. qdisc_purge_queue(cl->qdisc);
  133. qdisc_class_hash_remove(&q->clhash, &cl->common);
  134. sch_tree_unlock(sch);
  135. drr_destroy_class(sch, cl);
  136. return 0;
  137. }
  138. static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
  139. {
  140. return (unsigned long)drr_find_class(sch, classid);
  141. }
  142. static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
  143. struct netlink_ext_ack *extack)
  144. {
  145. struct drr_sched *q = qdisc_priv(sch);
  146. if (cl) {
  147. NL_SET_ERR_MSG(extack, "DRR classid must be zero");
  148. return NULL;
  149. }
  150. return q->block;
  151. }
  152. static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
  153. u32 classid)
  154. {
  155. struct drr_class *cl = drr_find_class(sch, classid);
  156. if (cl != NULL)
  157. cl->filter_cnt++;
  158. return (unsigned long)cl;
  159. }
  160. static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  161. {
  162. struct drr_class *cl = (struct drr_class *)arg;
  163. cl->filter_cnt--;
  164. }
  165. static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
  166. struct Qdisc *new, struct Qdisc **old,
  167. struct netlink_ext_ack *extack)
  168. {
  169. struct drr_class *cl = (struct drr_class *)arg;
  170. if (new == NULL) {
  171. new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  172. cl->common.classid, NULL);
  173. if (new == NULL)
  174. new = &noop_qdisc;
  175. }
  176. *old = qdisc_replace(sch, new, &cl->qdisc);
  177. return 0;
  178. }
  179. static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
  180. {
  181. struct drr_class *cl = (struct drr_class *)arg;
  182. return cl->qdisc;
  183. }
  184. static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
  185. {
  186. struct drr_class *cl = (struct drr_class *)arg;
  187. list_del(&cl->alist);
  188. }
  189. static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
  190. struct sk_buff *skb, struct tcmsg *tcm)
  191. {
  192. struct drr_class *cl = (struct drr_class *)arg;
  193. struct nlattr *nest;
  194. tcm->tcm_parent = TC_H_ROOT;
  195. tcm->tcm_handle = cl->common.classid;
  196. tcm->tcm_info = cl->qdisc->handle;
  197. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  198. if (nest == NULL)
  199. goto nla_put_failure;
  200. if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
  201. goto nla_put_failure;
  202. return nla_nest_end(skb, nest);
  203. nla_put_failure:
  204. nla_nest_cancel(skb, nest);
  205. return -EMSGSIZE;
  206. }
  207. static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  208. struct gnet_dump *d)
  209. {
  210. struct drr_class *cl = (struct drr_class *)arg;
  211. __u32 qlen = qdisc_qlen_sum(cl->qdisc);
  212. struct Qdisc *cl_q = cl->qdisc;
  213. struct tc_drr_stats xstats;
  214. memset(&xstats, 0, sizeof(xstats));
  215. if (qlen)
  216. xstats.deficit = cl->deficit;
  217. if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
  218. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  219. gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
  220. return -1;
  221. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  222. }
  223. static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  224. {
  225. struct drr_sched *q = qdisc_priv(sch);
  226. struct drr_class *cl;
  227. unsigned int i;
  228. if (arg->stop)
  229. return;
  230. for (i = 0; i < q->clhash.hashsize; i++) {
  231. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  232. if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
  233. return;
  234. }
  235. }
  236. }
  237. static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
  238. int *qerr)
  239. {
  240. struct drr_sched *q = qdisc_priv(sch);
  241. struct drr_class *cl;
  242. struct tcf_result res;
  243. struct tcf_proto *fl;
  244. int result;
  245. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
  246. cl = drr_find_class(sch, skb->priority);
  247. if (cl != NULL)
  248. return cl;
  249. }
  250. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  251. fl = rcu_dereference_bh(q->filter_list);
  252. result = tcf_classify(skb, NULL, fl, &res, false);
  253. if (result >= 0) {
  254. #ifdef CONFIG_NET_CLS_ACT
  255. switch (result) {
  256. case TC_ACT_QUEUED:
  257. case TC_ACT_STOLEN:
  258. case TC_ACT_TRAP:
  259. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  260. fallthrough;
  261. case TC_ACT_SHOT:
  262. return NULL;
  263. }
  264. #endif
  265. cl = (struct drr_class *)res.class;
  266. if (cl == NULL)
  267. cl = drr_find_class(sch, res.classid);
  268. return cl;
  269. }
  270. return NULL;
  271. }
  272. static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  273. struct sk_buff **to_free)
  274. {
  275. unsigned int len = qdisc_pkt_len(skb);
  276. struct drr_sched *q = qdisc_priv(sch);
  277. struct drr_class *cl;
  278. int err = 0;
  279. bool first;
  280. cl = drr_classify(skb, sch, &err);
  281. if (cl == NULL) {
  282. if (err & __NET_XMIT_BYPASS)
  283. qdisc_qstats_drop(sch);
  284. __qdisc_drop(skb, to_free);
  285. return err;
  286. }
  287. first = !cl->qdisc->q.qlen;
  288. err = qdisc_enqueue(skb, cl->qdisc, to_free);
  289. if (unlikely(err != NET_XMIT_SUCCESS)) {
  290. if (net_xmit_drop_count(err)) {
  291. cl->qstats.drops++;
  292. qdisc_qstats_drop(sch);
  293. }
  294. return err;
  295. }
  296. if (first) {
  297. list_add_tail(&cl->alist, &q->active);
  298. cl->deficit = cl->quantum;
  299. }
  300. sch->qstats.backlog += len;
  301. sch->q.qlen++;
  302. return err;
  303. }
  304. static struct sk_buff *drr_dequeue(struct Qdisc *sch)
  305. {
  306. struct drr_sched *q = qdisc_priv(sch);
  307. struct drr_class *cl;
  308. struct sk_buff *skb;
  309. unsigned int len;
  310. if (list_empty(&q->active))
  311. goto out;
  312. while (1) {
  313. cl = list_first_entry(&q->active, struct drr_class, alist);
  314. skb = cl->qdisc->ops->peek(cl->qdisc);
  315. if (skb == NULL) {
  316. qdisc_warn_nonwc(__func__, cl->qdisc);
  317. goto out;
  318. }
  319. len = qdisc_pkt_len(skb);
  320. if (len <= cl->deficit) {
  321. cl->deficit -= len;
  322. skb = qdisc_dequeue_peeked(cl->qdisc);
  323. if (unlikely(skb == NULL))
  324. goto out;
  325. if (cl->qdisc->q.qlen == 0)
  326. list_del(&cl->alist);
  327. bstats_update(&cl->bstats, skb);
  328. qdisc_bstats_update(sch, skb);
  329. qdisc_qstats_backlog_dec(sch, skb);
  330. sch->q.qlen--;
  331. return skb;
  332. }
  333. cl->deficit += cl->quantum;
  334. list_move_tail(&cl->alist, &q->active);
  335. }
  336. out:
  337. return NULL;
  338. }
  339. static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
  340. struct netlink_ext_ack *extack)
  341. {
  342. struct drr_sched *q = qdisc_priv(sch);
  343. int err;
  344. err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
  345. if (err)
  346. return err;
  347. err = qdisc_class_hash_init(&q->clhash);
  348. if (err < 0)
  349. return err;
  350. INIT_LIST_HEAD(&q->active);
  351. return 0;
  352. }
  353. static void drr_reset_qdisc(struct Qdisc *sch)
  354. {
  355. struct drr_sched *q = qdisc_priv(sch);
  356. struct drr_class *cl;
  357. unsigned int i;
  358. for (i = 0; i < q->clhash.hashsize; i++) {
  359. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  360. if (cl->qdisc->q.qlen)
  361. list_del(&cl->alist);
  362. qdisc_reset(cl->qdisc);
  363. }
  364. }
  365. }
  366. static void drr_destroy_qdisc(struct Qdisc *sch)
  367. {
  368. struct drr_sched *q = qdisc_priv(sch);
  369. struct drr_class *cl;
  370. struct hlist_node *next;
  371. unsigned int i;
  372. tcf_block_put(q->block);
  373. for (i = 0; i < q->clhash.hashsize; i++) {
  374. hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
  375. common.hnode)
  376. drr_destroy_class(sch, cl);
  377. }
  378. qdisc_class_hash_destroy(&q->clhash);
  379. }
  380. static const struct Qdisc_class_ops drr_class_ops = {
  381. .change = drr_change_class,
  382. .delete = drr_delete_class,
  383. .find = drr_search_class,
  384. .tcf_block = drr_tcf_block,
  385. .bind_tcf = drr_bind_tcf,
  386. .unbind_tcf = drr_unbind_tcf,
  387. .graft = drr_graft_class,
  388. .leaf = drr_class_leaf,
  389. .qlen_notify = drr_qlen_notify,
  390. .dump = drr_dump_class,
  391. .dump_stats = drr_dump_class_stats,
  392. .walk = drr_walk,
  393. };
  394. static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
  395. .cl_ops = &drr_class_ops,
  396. .id = "drr",
  397. .priv_size = sizeof(struct drr_sched),
  398. .enqueue = drr_enqueue,
  399. .dequeue = drr_dequeue,
  400. .peek = qdisc_peek_dequeued,
  401. .init = drr_init_qdisc,
  402. .reset = drr_reset_qdisc,
  403. .destroy = drr_destroy_qdisc,
  404. .owner = THIS_MODULE,
  405. };
  406. static int __init drr_init(void)
  407. {
  408. return register_qdisc(&drr_qdisc_ops);
  409. }
  410. static void __exit drr_exit(void)
  411. {
  412. unregister_qdisc(&drr_qdisc_ops);
  413. }
  414. module_init(drr_init);
  415. module_exit(drr_exit);
  416. MODULE_LICENSE("GPL");