sch_ingress.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* net/sched/sch_ingress.c - Ingress and clsact qdisc
  3. *
  4. * Authors: Jamal Hadi Salim 1999
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/list.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/rtnetlink.h>
  11. #include <net/netlink.h>
  12. #include <net/pkt_sched.h>
  13. #include <net/pkt_cls.h>
  14. struct ingress_sched_data {
  15. struct tcf_block *block;
  16. struct tcf_block_ext_info block_info;
  17. struct mini_Qdisc_pair miniqp;
  18. };
  19. static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
  20. {
  21. return NULL;
  22. }
  23. static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
  24. {
  25. return TC_H_MIN(classid) + 1;
  26. }
  27. static unsigned long ingress_bind_filter(struct Qdisc *sch,
  28. unsigned long parent, u32 classid)
  29. {
  30. return ingress_find(sch, classid);
  31. }
  32. static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
  33. {
  34. }
  35. static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  36. {
  37. }
  38. static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
  39. struct netlink_ext_ack *extack)
  40. {
  41. struct ingress_sched_data *q = qdisc_priv(sch);
  42. return q->block;
  43. }
  44. static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
  45. {
  46. struct mini_Qdisc_pair *miniqp = priv;
  47. mini_qdisc_pair_swap(miniqp, tp_head);
  48. };
  49. static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
  50. {
  51. struct ingress_sched_data *q = qdisc_priv(sch);
  52. q->block_info.block_index = block_index;
  53. }
  54. static u32 ingress_ingress_block_get(struct Qdisc *sch)
  55. {
  56. struct ingress_sched_data *q = qdisc_priv(sch);
  57. return q->block_info.block_index;
  58. }
  59. static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
  60. struct netlink_ext_ack *extack)
  61. {
  62. struct ingress_sched_data *q = qdisc_priv(sch);
  63. struct net_device *dev = qdisc_dev(sch);
  64. int err;
  65. if (sch->parent != TC_H_INGRESS)
  66. return -EOPNOTSUPP;
  67. net_inc_ingress_queue();
  68. mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
  69. q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
  70. q->block_info.chain_head_change = clsact_chain_head_change;
  71. q->block_info.chain_head_change_priv = &q->miniqp;
  72. err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
  73. if (err)
  74. return err;
  75. mini_qdisc_pair_block_init(&q->miniqp, q->block);
  76. return 0;
  77. }
  78. static void ingress_destroy(struct Qdisc *sch)
  79. {
  80. struct ingress_sched_data *q = qdisc_priv(sch);
  81. if (sch->parent != TC_H_INGRESS)
  82. return;
  83. tcf_block_put_ext(q->block, sch, &q->block_info);
  84. net_dec_ingress_queue();
  85. }
  86. static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
  87. {
  88. struct nlattr *nest;
  89. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  90. if (nest == NULL)
  91. goto nla_put_failure;
  92. return nla_nest_end(skb, nest);
  93. nla_put_failure:
  94. nla_nest_cancel(skb, nest);
  95. return -1;
  96. }
  97. static const struct Qdisc_class_ops ingress_class_ops = {
  98. .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
  99. .leaf = ingress_leaf,
  100. .find = ingress_find,
  101. .walk = ingress_walk,
  102. .tcf_block = ingress_tcf_block,
  103. .bind_tcf = ingress_bind_filter,
  104. .unbind_tcf = ingress_unbind_filter,
  105. };
  106. static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
  107. .cl_ops = &ingress_class_ops,
  108. .id = "ingress",
  109. .priv_size = sizeof(struct ingress_sched_data),
  110. .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
  111. .init = ingress_init,
  112. .destroy = ingress_destroy,
  113. .dump = ingress_dump,
  114. .ingress_block_set = ingress_ingress_block_set,
  115. .ingress_block_get = ingress_ingress_block_get,
  116. .owner = THIS_MODULE,
  117. };
  118. struct clsact_sched_data {
  119. struct tcf_block *ingress_block;
  120. struct tcf_block *egress_block;
  121. struct tcf_block_ext_info ingress_block_info;
  122. struct tcf_block_ext_info egress_block_info;
  123. struct mini_Qdisc_pair miniqp_ingress;
  124. struct mini_Qdisc_pair miniqp_egress;
  125. };
  126. static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
  127. {
  128. switch (TC_H_MIN(classid)) {
  129. case TC_H_MIN(TC_H_MIN_INGRESS):
  130. case TC_H_MIN(TC_H_MIN_EGRESS):
  131. return TC_H_MIN(classid);
  132. default:
  133. return 0;
  134. }
  135. }
  136. static unsigned long clsact_bind_filter(struct Qdisc *sch,
  137. unsigned long parent, u32 classid)
  138. {
  139. return clsact_find(sch, classid);
  140. }
  141. static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
  142. struct netlink_ext_ack *extack)
  143. {
  144. struct clsact_sched_data *q = qdisc_priv(sch);
  145. switch (cl) {
  146. case TC_H_MIN(TC_H_MIN_INGRESS):
  147. return q->ingress_block;
  148. case TC_H_MIN(TC_H_MIN_EGRESS):
  149. return q->egress_block;
  150. default:
  151. return NULL;
  152. }
  153. }
  154. static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
  155. {
  156. struct clsact_sched_data *q = qdisc_priv(sch);
  157. q->ingress_block_info.block_index = block_index;
  158. }
  159. static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
  160. {
  161. struct clsact_sched_data *q = qdisc_priv(sch);
  162. q->egress_block_info.block_index = block_index;
  163. }
  164. static u32 clsact_ingress_block_get(struct Qdisc *sch)
  165. {
  166. struct clsact_sched_data *q = qdisc_priv(sch);
  167. return q->ingress_block_info.block_index;
  168. }
  169. static u32 clsact_egress_block_get(struct Qdisc *sch)
  170. {
  171. struct clsact_sched_data *q = qdisc_priv(sch);
  172. return q->egress_block_info.block_index;
  173. }
  174. static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
  175. struct netlink_ext_ack *extack)
  176. {
  177. struct clsact_sched_data *q = qdisc_priv(sch);
  178. struct net_device *dev = qdisc_dev(sch);
  179. int err;
  180. if (sch->parent != TC_H_CLSACT)
  181. return -EOPNOTSUPP;
  182. net_inc_ingress_queue();
  183. net_inc_egress_queue();
  184. mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
  185. q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
  186. q->ingress_block_info.chain_head_change = clsact_chain_head_change;
  187. q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
  188. err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
  189. extack);
  190. if (err)
  191. return err;
  192. mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
  193. mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
  194. q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
  195. q->egress_block_info.chain_head_change = clsact_chain_head_change;
  196. q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
  197. return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
  198. }
  199. static void clsact_destroy(struct Qdisc *sch)
  200. {
  201. struct clsact_sched_data *q = qdisc_priv(sch);
  202. if (sch->parent != TC_H_CLSACT)
  203. return;
  204. tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
  205. tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
  206. net_dec_ingress_queue();
  207. net_dec_egress_queue();
  208. }
  209. static const struct Qdisc_class_ops clsact_class_ops = {
  210. .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
  211. .leaf = ingress_leaf,
  212. .find = clsact_find,
  213. .walk = ingress_walk,
  214. .tcf_block = clsact_tcf_block,
  215. .bind_tcf = clsact_bind_filter,
  216. .unbind_tcf = ingress_unbind_filter,
  217. };
  218. static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
  219. .cl_ops = &clsact_class_ops,
  220. .id = "clsact",
  221. .priv_size = sizeof(struct clsact_sched_data),
  222. .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
  223. .init = clsact_init,
  224. .destroy = clsact_destroy,
  225. .dump = ingress_dump,
  226. .ingress_block_set = clsact_ingress_block_set,
  227. .egress_block_set = clsact_egress_block_set,
  228. .ingress_block_get = clsact_ingress_block_get,
  229. .egress_block_get = clsact_egress_block_get,
  230. .owner = THIS_MODULE,
  231. };
  232. static int __init ingress_module_init(void)
  233. {
  234. int ret;
  235. ret = register_qdisc(&ingress_qdisc_ops);
  236. if (!ret) {
  237. ret = register_qdisc(&clsact_qdisc_ops);
  238. if (ret)
  239. unregister_qdisc(&ingress_qdisc_ops);
  240. }
  241. return ret;
  242. }
  243. static void __exit ingress_module_exit(void)
  244. {
  245. unregister_qdisc(&ingress_qdisc_ops);
  246. unregister_qdisc(&clsact_qdisc_ops);
  247. }
  248. module_init(ingress_module_init);
  249. module_exit(ingress_module_exit);
  250. MODULE_ALIAS("sch_clsact");
  251. MODULE_LICENSE("GPL");