sch_mq.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * net/sched/sch_mq.c Classful multiqueue dummy scheduler
  4. *
  5. * Copyright (c) 2009 Patrick McHardy <[email protected]>
  6. */
  7. #include <linux/types.h>
  8. #include <linux/slab.h>
  9. #include <linux/kernel.h>
  10. #include <linux/export.h>
  11. #include <linux/string.h>
  12. #include <linux/errno.h>
  13. #include <linux/skbuff.h>
  14. #include <net/netlink.h>
  15. #include <net/pkt_cls.h>
  16. #include <net/pkt_sched.h>
  17. #include <net/sch_generic.h>
  18. struct mq_sched {
  19. struct Qdisc **qdiscs;
  20. };
  21. static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
  22. {
  23. struct net_device *dev = qdisc_dev(sch);
  24. struct tc_mq_qopt_offload opt = {
  25. .command = cmd,
  26. .handle = sch->handle,
  27. };
  28. if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
  29. return -EOPNOTSUPP;
  30. return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
  31. }
  32. static int mq_offload_stats(struct Qdisc *sch)
  33. {
  34. struct tc_mq_qopt_offload opt = {
  35. .command = TC_MQ_STATS,
  36. .handle = sch->handle,
  37. .stats = {
  38. .bstats = &sch->bstats,
  39. .qstats = &sch->qstats,
  40. },
  41. };
  42. return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
  43. }
  44. static void mq_destroy(struct Qdisc *sch)
  45. {
  46. struct net_device *dev = qdisc_dev(sch);
  47. struct mq_sched *priv = qdisc_priv(sch);
  48. unsigned int ntx;
  49. mq_offload(sch, TC_MQ_DESTROY);
  50. if (!priv->qdiscs)
  51. return;
  52. for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
  53. qdisc_put(priv->qdiscs[ntx]);
  54. kfree(priv->qdiscs);
  55. }
  56. static int mq_init(struct Qdisc *sch, struct nlattr *opt,
  57. struct netlink_ext_ack *extack)
  58. {
  59. struct net_device *dev = qdisc_dev(sch);
  60. struct mq_sched *priv = qdisc_priv(sch);
  61. struct netdev_queue *dev_queue;
  62. struct Qdisc *qdisc;
  63. unsigned int ntx;
  64. if (sch->parent != TC_H_ROOT)
  65. return -EOPNOTSUPP;
  66. if (!netif_is_multiqueue(dev))
  67. return -EOPNOTSUPP;
  68. /* pre-allocate qdiscs, attachment can't fail */
  69. priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
  70. GFP_KERNEL);
  71. if (!priv->qdiscs)
  72. return -ENOMEM;
  73. for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  74. dev_queue = netdev_get_tx_queue(dev, ntx);
  75. qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
  76. TC_H_MAKE(TC_H_MAJ(sch->handle),
  77. TC_H_MIN(ntx + 1)),
  78. extack);
  79. if (!qdisc)
  80. return -ENOMEM;
  81. priv->qdiscs[ntx] = qdisc;
  82. qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
  83. }
  84. sch->flags |= TCQ_F_MQROOT;
  85. mq_offload(sch, TC_MQ_CREATE);
  86. return 0;
  87. }
  88. static void mq_attach(struct Qdisc *sch)
  89. {
  90. struct net_device *dev = qdisc_dev(sch);
  91. struct mq_sched *priv = qdisc_priv(sch);
  92. struct Qdisc *qdisc, *old;
  93. unsigned int ntx;
  94. for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  95. qdisc = priv->qdiscs[ntx];
  96. old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
  97. if (old)
  98. qdisc_put(old);
  99. #ifdef CONFIG_NET_SCHED
  100. if (ntx < dev->real_num_tx_queues)
  101. qdisc_hash_add(qdisc, false);
  102. #endif
  103. }
  104. kfree(priv->qdiscs);
  105. priv->qdiscs = NULL;
  106. }
  107. static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
  108. {
  109. struct net_device *dev = qdisc_dev(sch);
  110. struct Qdisc *qdisc;
  111. unsigned int ntx;
  112. sch->q.qlen = 0;
  113. gnet_stats_basic_sync_init(&sch->bstats);
  114. memset(&sch->qstats, 0, sizeof(sch->qstats));
  115. /* MQ supports lockless qdiscs. However, statistics accounting needs
  116. * to account for all, none, or a mix of locked and unlocked child
  117. * qdiscs. Percpu stats are added to counters in-band and locking
  118. * qdisc totals are added at end.
  119. */
  120. for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  121. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
  122. spin_lock_bh(qdisc_lock(qdisc));
  123. gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
  124. &qdisc->bstats, false);
  125. gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
  126. &qdisc->qstats);
  127. sch->q.qlen += qdisc_qlen(qdisc);
  128. spin_unlock_bh(qdisc_lock(qdisc));
  129. }
  130. return mq_offload_stats(sch);
  131. }
  132. static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
  133. {
  134. struct net_device *dev = qdisc_dev(sch);
  135. unsigned long ntx = cl - 1;
  136. if (ntx >= dev->num_tx_queues)
  137. return NULL;
  138. return netdev_get_tx_queue(dev, ntx);
  139. }
  140. static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
  141. struct tcmsg *tcm)
  142. {
  143. return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
  144. }
  145. static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
  146. struct Qdisc **old, struct netlink_ext_ack *extack)
  147. {
  148. struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
  149. struct tc_mq_qopt_offload graft_offload;
  150. struct net_device *dev = qdisc_dev(sch);
  151. if (dev->flags & IFF_UP)
  152. dev_deactivate(dev);
  153. *old = dev_graft_qdisc(dev_queue, new);
  154. if (new)
  155. new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
  156. if (dev->flags & IFF_UP)
  157. dev_activate(dev);
  158. graft_offload.handle = sch->handle;
  159. graft_offload.graft_params.queue = cl - 1;
  160. graft_offload.graft_params.child_handle = new ? new->handle : 0;
  161. graft_offload.command = TC_MQ_GRAFT;
  162. qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
  163. TC_SETUP_QDISC_MQ, &graft_offload, extack);
  164. return 0;
  165. }
  166. static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
  167. {
  168. struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
  169. return rtnl_dereference(dev_queue->qdisc_sleeping);
  170. }
  171. static unsigned long mq_find(struct Qdisc *sch, u32 classid)
  172. {
  173. unsigned int ntx = TC_H_MIN(classid);
  174. if (!mq_queue_get(sch, ntx))
  175. return 0;
  176. return ntx;
  177. }
  178. static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
  179. struct sk_buff *skb, struct tcmsg *tcm)
  180. {
  181. struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
  182. tcm->tcm_parent = TC_H_ROOT;
  183. tcm->tcm_handle |= TC_H_MIN(cl);
  184. tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
  185. return 0;
  186. }
  187. static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  188. struct gnet_dump *d)
  189. {
  190. struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
  191. sch = rtnl_dereference(dev_queue->qdisc_sleeping);
  192. if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
  193. qdisc_qstats_copy(d, sch) < 0)
  194. return -1;
  195. return 0;
  196. }
  197. static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  198. {
  199. struct net_device *dev = qdisc_dev(sch);
  200. unsigned int ntx;
  201. if (arg->stop)
  202. return;
  203. arg->count = arg->skip;
  204. for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
  205. if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
  206. break;
  207. }
  208. }
  209. static const struct Qdisc_class_ops mq_class_ops = {
  210. .select_queue = mq_select_queue,
  211. .graft = mq_graft,
  212. .leaf = mq_leaf,
  213. .find = mq_find,
  214. .walk = mq_walk,
  215. .dump = mq_dump_class,
  216. .dump_stats = mq_dump_class_stats,
  217. };
  218. struct Qdisc_ops mq_qdisc_ops __read_mostly = {
  219. .cl_ops = &mq_class_ops,
  220. .id = "mq",
  221. .priv_size = sizeof(struct mq_sched),
  222. .init = mq_init,
  223. .destroy = mq_destroy,
  224. .attach = mq_attach,
  225. .change_real_num_tx = mq_change_real_num_tx,
  226. .dump = mq_dump,
  227. .owner = THIS_MODULE,
  228. };