sch_multiq.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2008, Intel Corporation.
  4. *
  5. * Author: Alexander Duyck <[email protected]>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/slab.h>
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include <linux/errno.h>
  13. #include <linux/skbuff.h>
  14. #include <net/netlink.h>
  15. #include <net/pkt_sched.h>
  16. #include <net/pkt_cls.h>
  17. struct multiq_sched_data {
  18. u16 bands;
  19. u16 max_bands;
  20. u16 curband;
  21. struct tcf_proto __rcu *filter_list;
  22. struct tcf_block *block;
  23. struct Qdisc **queues;
  24. };
  25. static struct Qdisc *
  26. multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  27. {
  28. struct multiq_sched_data *q = qdisc_priv(sch);
  29. u32 band;
  30. struct tcf_result res;
  31. struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
  32. int err;
  33. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  34. err = tcf_classify(skb, NULL, fl, &res, false);
  35. #ifdef CONFIG_NET_CLS_ACT
  36. switch (err) {
  37. case TC_ACT_STOLEN:
  38. case TC_ACT_QUEUED:
  39. case TC_ACT_TRAP:
  40. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  41. fallthrough;
  42. case TC_ACT_SHOT:
  43. return NULL;
  44. }
  45. #endif
  46. band = skb_get_queue_mapping(skb);
  47. if (band >= q->bands)
  48. return q->queues[0];
  49. return q->queues[band];
  50. }
  51. static int
  52. multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  53. struct sk_buff **to_free)
  54. {
  55. struct Qdisc *qdisc;
  56. int ret;
  57. qdisc = multiq_classify(skb, sch, &ret);
  58. #ifdef CONFIG_NET_CLS_ACT
  59. if (qdisc == NULL) {
  60. if (ret & __NET_XMIT_BYPASS)
  61. qdisc_qstats_drop(sch);
  62. __qdisc_drop(skb, to_free);
  63. return ret;
  64. }
  65. #endif
  66. ret = qdisc_enqueue(skb, qdisc, to_free);
  67. if (ret == NET_XMIT_SUCCESS) {
  68. sch->q.qlen++;
  69. return NET_XMIT_SUCCESS;
  70. }
  71. if (net_xmit_drop_count(ret))
  72. qdisc_qstats_drop(sch);
  73. return ret;
  74. }
  75. static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
  76. {
  77. struct multiq_sched_data *q = qdisc_priv(sch);
  78. struct Qdisc *qdisc;
  79. struct sk_buff *skb;
  80. int band;
  81. for (band = 0; band < q->bands; band++) {
  82. /* cycle through bands to ensure fairness */
  83. q->curband++;
  84. if (q->curband >= q->bands)
  85. q->curband = 0;
  86. /* Check that target subqueue is available before
  87. * pulling an skb to avoid head-of-line blocking.
  88. */
  89. if (!netif_xmit_stopped(
  90. netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
  91. qdisc = q->queues[q->curband];
  92. skb = qdisc->dequeue(qdisc);
  93. if (skb) {
  94. qdisc_bstats_update(sch, skb);
  95. sch->q.qlen--;
  96. return skb;
  97. }
  98. }
  99. }
  100. return NULL;
  101. }
  102. static struct sk_buff *multiq_peek(struct Qdisc *sch)
  103. {
  104. struct multiq_sched_data *q = qdisc_priv(sch);
  105. unsigned int curband = q->curband;
  106. struct Qdisc *qdisc;
  107. struct sk_buff *skb;
  108. int band;
  109. for (band = 0; band < q->bands; band++) {
  110. /* cycle through bands to ensure fairness */
  111. curband++;
  112. if (curband >= q->bands)
  113. curband = 0;
  114. /* Check that target subqueue is available before
  115. * pulling an skb to avoid head-of-line blocking.
  116. */
  117. if (!netif_xmit_stopped(
  118. netdev_get_tx_queue(qdisc_dev(sch), curband))) {
  119. qdisc = q->queues[curband];
  120. skb = qdisc->ops->peek(qdisc);
  121. if (skb)
  122. return skb;
  123. }
  124. }
  125. return NULL;
  126. }
  127. static void
  128. multiq_reset(struct Qdisc *sch)
  129. {
  130. u16 band;
  131. struct multiq_sched_data *q = qdisc_priv(sch);
  132. for (band = 0; band < q->bands; band++)
  133. qdisc_reset(q->queues[band]);
  134. q->curband = 0;
  135. }
  136. static void
  137. multiq_destroy(struct Qdisc *sch)
  138. {
  139. int band;
  140. struct multiq_sched_data *q = qdisc_priv(sch);
  141. tcf_block_put(q->block);
  142. for (band = 0; band < q->bands; band++)
  143. qdisc_put(q->queues[band]);
  144. kfree(q->queues);
  145. }
  146. static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
  147. struct netlink_ext_ack *extack)
  148. {
  149. struct multiq_sched_data *q = qdisc_priv(sch);
  150. struct tc_multiq_qopt *qopt;
  151. struct Qdisc **removed;
  152. int i, n_removed = 0;
  153. if (!netif_is_multiqueue(qdisc_dev(sch)))
  154. return -EOPNOTSUPP;
  155. if (nla_len(opt) < sizeof(*qopt))
  156. return -EINVAL;
  157. qopt = nla_data(opt);
  158. qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
  159. removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
  160. GFP_KERNEL);
  161. if (!removed)
  162. return -ENOMEM;
  163. sch_tree_lock(sch);
  164. q->bands = qopt->bands;
  165. for (i = q->bands; i < q->max_bands; i++) {
  166. if (q->queues[i] != &noop_qdisc) {
  167. struct Qdisc *child = q->queues[i];
  168. q->queues[i] = &noop_qdisc;
  169. qdisc_purge_queue(child);
  170. removed[n_removed++] = child;
  171. }
  172. }
  173. sch_tree_unlock(sch);
  174. for (i = 0; i < n_removed; i++)
  175. qdisc_put(removed[i]);
  176. kfree(removed);
  177. for (i = 0; i < q->bands; i++) {
  178. if (q->queues[i] == &noop_qdisc) {
  179. struct Qdisc *child, *old;
  180. child = qdisc_create_dflt(sch->dev_queue,
  181. &pfifo_qdisc_ops,
  182. TC_H_MAKE(sch->handle,
  183. i + 1), extack);
  184. if (child) {
  185. sch_tree_lock(sch);
  186. old = q->queues[i];
  187. q->queues[i] = child;
  188. if (child != &noop_qdisc)
  189. qdisc_hash_add(child, true);
  190. if (old != &noop_qdisc)
  191. qdisc_purge_queue(old);
  192. sch_tree_unlock(sch);
  193. qdisc_put(old);
  194. }
  195. }
  196. }
  197. return 0;
  198. }
  199. static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
  200. struct netlink_ext_ack *extack)
  201. {
  202. struct multiq_sched_data *q = qdisc_priv(sch);
  203. int i, err;
  204. q->queues = NULL;
  205. if (!opt)
  206. return -EINVAL;
  207. err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
  208. if (err)
  209. return err;
  210. q->max_bands = qdisc_dev(sch)->num_tx_queues;
  211. q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
  212. if (!q->queues)
  213. return -ENOBUFS;
  214. for (i = 0; i < q->max_bands; i++)
  215. q->queues[i] = &noop_qdisc;
  216. return multiq_tune(sch, opt, extack);
  217. }
  218. static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
  219. {
  220. struct multiq_sched_data *q = qdisc_priv(sch);
  221. unsigned char *b = skb_tail_pointer(skb);
  222. struct tc_multiq_qopt opt;
  223. opt.bands = q->bands;
  224. opt.max_bands = q->max_bands;
  225. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  226. goto nla_put_failure;
  227. return skb->len;
  228. nla_put_failure:
  229. nlmsg_trim(skb, b);
  230. return -1;
  231. }
  232. static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  233. struct Qdisc **old, struct netlink_ext_ack *extack)
  234. {
  235. struct multiq_sched_data *q = qdisc_priv(sch);
  236. unsigned long band = arg - 1;
  237. if (new == NULL)
  238. new = &noop_qdisc;
  239. *old = qdisc_replace(sch, new, &q->queues[band]);
  240. return 0;
  241. }
  242. static struct Qdisc *
  243. multiq_leaf(struct Qdisc *sch, unsigned long arg)
  244. {
  245. struct multiq_sched_data *q = qdisc_priv(sch);
  246. unsigned long band = arg - 1;
  247. return q->queues[band];
  248. }
  249. static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
  250. {
  251. struct multiq_sched_data *q = qdisc_priv(sch);
  252. unsigned long band = TC_H_MIN(classid);
  253. if (band - 1 >= q->bands)
  254. return 0;
  255. return band;
  256. }
  257. static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
  258. u32 classid)
  259. {
  260. return multiq_find(sch, classid);
  261. }
  262. static void multiq_unbind(struct Qdisc *q, unsigned long cl)
  263. {
  264. }
  265. static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
  266. struct sk_buff *skb, struct tcmsg *tcm)
  267. {
  268. struct multiq_sched_data *q = qdisc_priv(sch);
  269. tcm->tcm_handle |= TC_H_MIN(cl);
  270. tcm->tcm_info = q->queues[cl - 1]->handle;
  271. return 0;
  272. }
  273. static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  274. struct gnet_dump *d)
  275. {
  276. struct multiq_sched_data *q = qdisc_priv(sch);
  277. struct Qdisc *cl_q;
  278. cl_q = q->queues[cl - 1];
  279. if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
  280. qdisc_qstats_copy(d, cl_q) < 0)
  281. return -1;
  282. return 0;
  283. }
  284. static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  285. {
  286. struct multiq_sched_data *q = qdisc_priv(sch);
  287. int band;
  288. if (arg->stop)
  289. return;
  290. for (band = 0; band < q->bands; band++) {
  291. if (!tc_qdisc_stats_dump(sch, band + 1, arg))
  292. break;
  293. }
  294. }
  295. static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
  296. struct netlink_ext_ack *extack)
  297. {
  298. struct multiq_sched_data *q = qdisc_priv(sch);
  299. if (cl)
  300. return NULL;
  301. return q->block;
  302. }
  303. static const struct Qdisc_class_ops multiq_class_ops = {
  304. .graft = multiq_graft,
  305. .leaf = multiq_leaf,
  306. .find = multiq_find,
  307. .walk = multiq_walk,
  308. .tcf_block = multiq_tcf_block,
  309. .bind_tcf = multiq_bind,
  310. .unbind_tcf = multiq_unbind,
  311. .dump = multiq_dump_class,
  312. .dump_stats = multiq_dump_class_stats,
  313. };
  314. static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
  315. .next = NULL,
  316. .cl_ops = &multiq_class_ops,
  317. .id = "multiq",
  318. .priv_size = sizeof(struct multiq_sched_data),
  319. .enqueue = multiq_enqueue,
  320. .dequeue = multiq_dequeue,
  321. .peek = multiq_peek,
  322. .init = multiq_init,
  323. .reset = multiq_reset,
  324. .destroy = multiq_destroy,
  325. .change = multiq_tune,
  326. .dump = multiq_dump,
  327. .owner = THIS_MODULE,
  328. };
  329. static int __init multiq_module_init(void)
  330. {
  331. return register_qdisc(&multiq_qdisc_ops);
  332. }
  333. static void __exit multiq_module_exit(void)
  334. {
  335. unregister_qdisc(&multiq_qdisc_ops);
  336. }
  337. module_init(multiq_module_init)
  338. module_exit(multiq_module_exit)
  339. MODULE_LICENSE("GPL");