cls_matchall.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/sched/cls_matchll.c Match-all classifier
  4. *
  5. * Copyright (c) 2016 Jiri Pirko <[email protected]>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/percpu.h>
  11. #include <net/sch_generic.h>
  12. #include <net/pkt_cls.h>
  13. struct cls_mall_head {
  14. struct tcf_exts exts;
  15. struct tcf_result res;
  16. u32 handle;
  17. u32 flags;
  18. unsigned int in_hw_count;
  19. struct tc_matchall_pcnt __percpu *pf;
  20. struct rcu_work rwork;
  21. bool deleting;
  22. };
  23. static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  24. struct tcf_result *res)
  25. {
  26. struct cls_mall_head *head = rcu_dereference_bh(tp->root);
  27. if (unlikely(!head))
  28. return -1;
  29. if (tc_skip_sw(head->flags))
  30. return -1;
  31. *res = head->res;
  32. __this_cpu_inc(head->pf->rhit);
  33. return tcf_exts_exec(skb, &head->exts, res);
  34. }
  35. static int mall_init(struct tcf_proto *tp)
  36. {
  37. return 0;
  38. }
  39. static void __mall_destroy(struct cls_mall_head *head)
  40. {
  41. tcf_exts_destroy(&head->exts);
  42. tcf_exts_put_net(&head->exts);
  43. free_percpu(head->pf);
  44. kfree(head);
  45. }
  46. static void mall_destroy_work(struct work_struct *work)
  47. {
  48. struct cls_mall_head *head = container_of(to_rcu_work(work),
  49. struct cls_mall_head,
  50. rwork);
  51. rtnl_lock();
  52. __mall_destroy(head);
  53. rtnl_unlock();
  54. }
  55. static void mall_destroy_hw_filter(struct tcf_proto *tp,
  56. struct cls_mall_head *head,
  57. unsigned long cookie,
  58. struct netlink_ext_ack *extack)
  59. {
  60. struct tc_cls_matchall_offload cls_mall = {};
  61. struct tcf_block *block = tp->chain->block;
  62. tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
  63. cls_mall.command = TC_CLSMATCHALL_DESTROY;
  64. cls_mall.cookie = cookie;
  65. tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
  66. &head->flags, &head->in_hw_count, true);
  67. }
  68. static int mall_replace_hw_filter(struct tcf_proto *tp,
  69. struct cls_mall_head *head,
  70. unsigned long cookie,
  71. struct netlink_ext_ack *extack)
  72. {
  73. struct tc_cls_matchall_offload cls_mall = {};
  74. struct tcf_block *block = tp->chain->block;
  75. bool skip_sw = tc_skip_sw(head->flags);
  76. int err;
  77. cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
  78. if (!cls_mall.rule)
  79. return -ENOMEM;
  80. tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
  81. cls_mall.command = TC_CLSMATCHALL_REPLACE;
  82. cls_mall.cookie = cookie;
  83. err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
  84. cls_mall.common.extack);
  85. if (err) {
  86. kfree(cls_mall.rule);
  87. mall_destroy_hw_filter(tp, head, cookie, NULL);
  88. return skip_sw ? err : 0;
  89. }
  90. err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
  91. skip_sw, &head->flags, &head->in_hw_count, true);
  92. tc_cleanup_offload_action(&cls_mall.rule->action);
  93. kfree(cls_mall.rule);
  94. if (err) {
  95. mall_destroy_hw_filter(tp, head, cookie, NULL);
  96. return err;
  97. }
  98. if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
  99. return -EINVAL;
  100. return 0;
  101. }
  102. static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
  103. struct netlink_ext_ack *extack)
  104. {
  105. struct cls_mall_head *head = rtnl_dereference(tp->root);
  106. if (!head)
  107. return;
  108. tcf_unbind_filter(tp, &head->res);
  109. if (!tc_skip_hw(head->flags))
  110. mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
  111. if (tcf_exts_get_net(&head->exts))
  112. tcf_queue_work(&head->rwork, mall_destroy_work);
  113. else
  114. __mall_destroy(head);
  115. }
  116. static void *mall_get(struct tcf_proto *tp, u32 handle)
  117. {
  118. struct cls_mall_head *head = rtnl_dereference(tp->root);
  119. if (head && head->handle == handle)
  120. return head;
  121. return NULL;
  122. }
  123. static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
  124. [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
  125. [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
  126. [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
  127. };
  128. static int mall_change(struct net *net, struct sk_buff *in_skb,
  129. struct tcf_proto *tp, unsigned long base,
  130. u32 handle, struct nlattr **tca,
  131. void **arg, u32 flags,
  132. struct netlink_ext_ack *extack)
  133. {
  134. struct cls_mall_head *head = rtnl_dereference(tp->root);
  135. struct nlattr *tb[TCA_MATCHALL_MAX + 1];
  136. bool bound_to_filter = false;
  137. struct cls_mall_head *new;
  138. u32 userflags = 0;
  139. int err;
  140. if (!tca[TCA_OPTIONS])
  141. return -EINVAL;
  142. if (head)
  143. return -EEXIST;
  144. err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
  145. tca[TCA_OPTIONS], mall_policy, NULL);
  146. if (err < 0)
  147. return err;
  148. if (tb[TCA_MATCHALL_FLAGS]) {
  149. userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
  150. if (!tc_flags_valid(userflags))
  151. return -EINVAL;
  152. }
  153. new = kzalloc(sizeof(*new), GFP_KERNEL);
  154. if (!new)
  155. return -ENOBUFS;
  156. err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
  157. if (err)
  158. goto err_exts_init;
  159. if (!handle)
  160. handle = 1;
  161. new->handle = handle;
  162. new->flags = userflags;
  163. new->pf = alloc_percpu(struct tc_matchall_pcnt);
  164. if (!new->pf) {
  165. err = -ENOMEM;
  166. goto err_alloc_percpu;
  167. }
  168. err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
  169. &new->exts, flags, new->flags, extack);
  170. if (err < 0)
  171. goto err_set_parms;
  172. if (tb[TCA_MATCHALL_CLASSID]) {
  173. new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
  174. tcf_bind_filter(tp, &new->res, base);
  175. bound_to_filter = true;
  176. }
  177. if (!tc_skip_hw(new->flags)) {
  178. err = mall_replace_hw_filter(tp, new, (unsigned long)new,
  179. extack);
  180. if (err)
  181. goto err_replace_hw_filter;
  182. }
  183. if (!tc_in_hw(new->flags))
  184. new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
  185. *arg = head;
  186. rcu_assign_pointer(tp->root, new);
  187. return 0;
  188. err_replace_hw_filter:
  189. if (bound_to_filter)
  190. tcf_unbind_filter(tp, &new->res);
  191. err_set_parms:
  192. free_percpu(new->pf);
  193. err_alloc_percpu:
  194. tcf_exts_destroy(&new->exts);
  195. err_exts_init:
  196. kfree(new);
  197. return err;
  198. }
  199. static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
  200. bool rtnl_held, struct netlink_ext_ack *extack)
  201. {
  202. struct cls_mall_head *head = rtnl_dereference(tp->root);
  203. head->deleting = true;
  204. *last = true;
  205. return 0;
  206. }
  207. static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
  208. bool rtnl_held)
  209. {
  210. struct cls_mall_head *head = rtnl_dereference(tp->root);
  211. if (arg->count < arg->skip)
  212. goto skip;
  213. if (!head || head->deleting)
  214. return;
  215. if (arg->fn(tp, head, arg) < 0)
  216. arg->stop = 1;
  217. skip:
  218. arg->count++;
  219. }
  220. static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
  221. void *cb_priv, struct netlink_ext_ack *extack)
  222. {
  223. struct cls_mall_head *head = rtnl_dereference(tp->root);
  224. struct tc_cls_matchall_offload cls_mall = {};
  225. struct tcf_block *block = tp->chain->block;
  226. int err;
  227. if (tc_skip_hw(head->flags))
  228. return 0;
  229. cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
  230. if (!cls_mall.rule)
  231. return -ENOMEM;
  232. tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
  233. cls_mall.command = add ?
  234. TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
  235. cls_mall.cookie = (unsigned long)head;
  236. err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
  237. cls_mall.common.extack);
  238. if (err) {
  239. kfree(cls_mall.rule);
  240. return add && tc_skip_sw(head->flags) ? err : 0;
  241. }
  242. err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
  243. &cls_mall, cb_priv, &head->flags,
  244. &head->in_hw_count);
  245. tc_cleanup_offload_action(&cls_mall.rule->action);
  246. kfree(cls_mall.rule);
  247. return err;
  248. }
  249. static void mall_stats_hw_filter(struct tcf_proto *tp,
  250. struct cls_mall_head *head,
  251. unsigned long cookie)
  252. {
  253. struct tc_cls_matchall_offload cls_mall = {};
  254. struct tcf_block *block = tp->chain->block;
  255. tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
  256. cls_mall.command = TC_CLSMATCHALL_STATS;
  257. cls_mall.cookie = cookie;
  258. tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
  259. tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
  260. cls_mall.stats.pkts, cls_mall.stats.drops,
  261. cls_mall.stats.lastused,
  262. cls_mall.stats.used_hw_stats,
  263. cls_mall.stats.used_hw_stats_valid);
  264. }
  265. static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
  266. struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
  267. {
  268. struct tc_matchall_pcnt gpf = {};
  269. struct cls_mall_head *head = fh;
  270. struct nlattr *nest;
  271. int cpu;
  272. if (!head)
  273. return skb->len;
  274. if (!tc_skip_hw(head->flags))
  275. mall_stats_hw_filter(tp, head, (unsigned long)head);
  276. t->tcm_handle = head->handle;
  277. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  278. if (!nest)
  279. goto nla_put_failure;
  280. if (head->res.classid &&
  281. nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
  282. goto nla_put_failure;
  283. if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
  284. goto nla_put_failure;
  285. for_each_possible_cpu(cpu) {
  286. struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
  287. gpf.rhit += pf->rhit;
  288. }
  289. if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
  290. sizeof(struct tc_matchall_pcnt),
  291. &gpf, TCA_MATCHALL_PAD))
  292. goto nla_put_failure;
  293. if (tcf_exts_dump(skb, &head->exts))
  294. goto nla_put_failure;
  295. nla_nest_end(skb, nest);
  296. if (tcf_exts_dump_stats(skb, &head->exts) < 0)
  297. goto nla_put_failure;
  298. return skb->len;
  299. nla_put_failure:
  300. nla_nest_cancel(skb, nest);
  301. return -1;
  302. }
  303. static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
  304. unsigned long base)
  305. {
  306. struct cls_mall_head *head = fh;
  307. tc_cls_bind_class(classid, cl, q, &head->res, base);
  308. }
  309. static struct tcf_proto_ops cls_mall_ops __read_mostly = {
  310. .kind = "matchall",
  311. .classify = mall_classify,
  312. .init = mall_init,
  313. .destroy = mall_destroy,
  314. .get = mall_get,
  315. .change = mall_change,
  316. .delete = mall_delete,
  317. .walk = mall_walk,
  318. .reoffload = mall_reoffload,
  319. .dump = mall_dump,
  320. .bind_class = mall_bind_class,
  321. .owner = THIS_MODULE,
  322. };
  323. static int __init cls_mall_init(void)
  324. {
  325. return register_tcf_proto_ops(&cls_mall_ops);
  326. }
  327. static void __exit cls_mall_exit(void)
  328. {
  329. unregister_tcf_proto_ops(&cls_mall_ops);
  330. }
  331. module_init(cls_mall_init);
  332. module_exit(cls_mall_exit);
  333. MODULE_AUTHOR("Jiri Pirko <[email protected]>");
  334. MODULE_DESCRIPTION("Match-all classifier");
  335. MODULE_LICENSE("GPL v2");