act_bpf.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) 2015 Jiri Pirko <[email protected]>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/rtnetlink.h>
  10. #include <linux/filter.h>
  11. #include <linux/bpf.h>
  12. #include <net/netlink.h>
  13. #include <net/sock.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/pkt_cls.h>
  16. #include <linux/tc_act/tc_bpf.h>
  17. #include <net/tc_act/tc_bpf.h>
  18. #define ACT_BPF_NAME_LEN 256
  19. struct tcf_bpf_cfg {
  20. struct bpf_prog *filter;
  21. struct sock_filter *bpf_ops;
  22. const char *bpf_name;
  23. u16 bpf_num_ops;
  24. bool is_ebpf;
  25. };
  26. static struct tc_action_ops act_bpf_ops;
  27. static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
  28. struct tcf_result *res)
  29. {
  30. bool at_ingress = skb_at_tc_ingress(skb);
  31. struct tcf_bpf *prog = to_bpf(act);
  32. struct bpf_prog *filter;
  33. int action, filter_res;
  34. tcf_lastuse_update(&prog->tcf_tm);
  35. bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
  36. filter = rcu_dereference(prog->filter);
  37. if (at_ingress) {
  38. __skb_push(skb, skb->mac_len);
  39. bpf_compute_data_pointers(skb);
  40. filter_res = bpf_prog_run(filter, skb);
  41. __skb_pull(skb, skb->mac_len);
  42. } else {
  43. bpf_compute_data_pointers(skb);
  44. filter_res = bpf_prog_run(filter, skb);
  45. }
  46. if (unlikely(!skb->tstamp && skb->mono_delivery_time))
  47. skb->mono_delivery_time = 0;
  48. if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
  49. skb_orphan(skb);
  50. /* A BPF program may overwrite the default action opcode.
  51. * Similarly as in cls_bpf, if filter_res == -1 we use the
  52. * default action specified from tc.
  53. *
  54. * In case a different well-known TC_ACT opcode has been
  55. * returned, it will overwrite the default one.
  56. *
  57. * For everything else that is unknown, TC_ACT_UNSPEC is
  58. * returned.
  59. */
  60. switch (filter_res) {
  61. case TC_ACT_PIPE:
  62. case TC_ACT_RECLASSIFY:
  63. case TC_ACT_OK:
  64. case TC_ACT_REDIRECT:
  65. action = filter_res;
  66. break;
  67. case TC_ACT_SHOT:
  68. action = filter_res;
  69. qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
  70. break;
  71. case TC_ACT_UNSPEC:
  72. action = prog->tcf_action;
  73. break;
  74. default:
  75. action = TC_ACT_UNSPEC;
  76. break;
  77. }
  78. return action;
  79. }
  80. static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
  81. {
  82. return !prog->bpf_ops;
  83. }
  84. static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
  85. struct sk_buff *skb)
  86. {
  87. struct nlattr *nla;
  88. if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
  89. return -EMSGSIZE;
  90. nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
  91. sizeof(struct sock_filter));
  92. if (nla == NULL)
  93. return -EMSGSIZE;
  94. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  95. return 0;
  96. }
  97. static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
  98. struct sk_buff *skb)
  99. {
  100. struct nlattr *nla;
  101. if (prog->bpf_name &&
  102. nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
  103. return -EMSGSIZE;
  104. if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
  105. return -EMSGSIZE;
  106. nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
  107. if (nla == NULL)
  108. return -EMSGSIZE;
  109. memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
  110. return 0;
  111. }
  112. static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
  113. int bind, int ref)
  114. {
  115. unsigned char *tp = skb_tail_pointer(skb);
  116. struct tcf_bpf *prog = to_bpf(act);
  117. struct tc_act_bpf opt = {
  118. .index = prog->tcf_index,
  119. .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
  120. .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
  121. };
  122. struct tcf_t tm;
  123. int ret;
  124. spin_lock_bh(&prog->tcf_lock);
  125. opt.action = prog->tcf_action;
  126. if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
  127. goto nla_put_failure;
  128. if (tcf_bpf_is_ebpf(prog))
  129. ret = tcf_bpf_dump_ebpf_info(prog, skb);
  130. else
  131. ret = tcf_bpf_dump_bpf_info(prog, skb);
  132. if (ret)
  133. goto nla_put_failure;
  134. tcf_tm_dump(&tm, &prog->tcf_tm);
  135. if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
  136. TCA_ACT_BPF_PAD))
  137. goto nla_put_failure;
  138. spin_unlock_bh(&prog->tcf_lock);
  139. return skb->len;
  140. nla_put_failure:
  141. spin_unlock_bh(&prog->tcf_lock);
  142. nlmsg_trim(skb, tp);
  143. return -1;
  144. }
  145. static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
  146. [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
  147. [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
  148. [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
  149. .len = ACT_BPF_NAME_LEN },
  150. [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
  151. [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
  152. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  153. };
  154. static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  155. {
  156. struct sock_filter *bpf_ops;
  157. struct sock_fprog_kern fprog_tmp;
  158. struct bpf_prog *fp;
  159. u16 bpf_size, bpf_num_ops;
  160. int ret;
  161. bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
  162. if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  163. return -EINVAL;
  164. bpf_size = bpf_num_ops * sizeof(*bpf_ops);
  165. if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
  166. return -EINVAL;
  167. bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
  168. if (bpf_ops == NULL)
  169. return -ENOMEM;
  170. fprog_tmp.len = bpf_num_ops;
  171. fprog_tmp.filter = bpf_ops;
  172. ret = bpf_prog_create(&fp, &fprog_tmp);
  173. if (ret < 0) {
  174. kfree(bpf_ops);
  175. return ret;
  176. }
  177. cfg->bpf_ops = bpf_ops;
  178. cfg->bpf_num_ops = bpf_num_ops;
  179. cfg->filter = fp;
  180. cfg->is_ebpf = false;
  181. return 0;
  182. }
  183. static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  184. {
  185. struct bpf_prog *fp;
  186. char *name = NULL;
  187. u32 bpf_fd;
  188. bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
  189. fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
  190. if (IS_ERR(fp))
  191. return PTR_ERR(fp);
  192. if (tb[TCA_ACT_BPF_NAME]) {
  193. name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
  194. if (!name) {
  195. bpf_prog_put(fp);
  196. return -ENOMEM;
  197. }
  198. }
  199. cfg->bpf_name = name;
  200. cfg->filter = fp;
  201. cfg->is_ebpf = true;
  202. return 0;
  203. }
  204. static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
  205. {
  206. struct bpf_prog *filter = cfg->filter;
  207. if (filter) {
  208. if (cfg->is_ebpf)
  209. bpf_prog_put(filter);
  210. else
  211. bpf_prog_destroy(filter);
  212. }
  213. kfree(cfg->bpf_ops);
  214. kfree(cfg->bpf_name);
  215. }
  216. static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
  217. struct tcf_bpf_cfg *cfg)
  218. {
  219. cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
  220. /* updates to prog->filter are prevented, since it's called either
  221. * with tcf lock or during final cleanup in rcu callback
  222. */
  223. cfg->filter = rcu_dereference_protected(prog->filter, 1);
  224. cfg->bpf_ops = prog->bpf_ops;
  225. cfg->bpf_name = prog->bpf_name;
  226. }
  227. static int tcf_bpf_init(struct net *net, struct nlattr *nla,
  228. struct nlattr *est, struct tc_action **act,
  229. struct tcf_proto *tp, u32 flags,
  230. struct netlink_ext_ack *extack)
  231. {
  232. struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
  233. bool bind = flags & TCA_ACT_FLAGS_BIND;
  234. struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
  235. struct tcf_chain *goto_ch = NULL;
  236. struct tcf_bpf_cfg cfg, old;
  237. struct tc_act_bpf *parm;
  238. struct tcf_bpf *prog;
  239. bool is_bpf, is_ebpf;
  240. int ret, res = 0;
  241. u32 index;
  242. if (!nla)
  243. return -EINVAL;
  244. ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
  245. act_bpf_policy, NULL);
  246. if (ret < 0)
  247. return ret;
  248. if (!tb[TCA_ACT_BPF_PARMS])
  249. return -EINVAL;
  250. parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
  251. index = parm->index;
  252. ret = tcf_idr_check_alloc(tn, &index, act, bind);
  253. if (!ret) {
  254. ret = tcf_idr_create(tn, index, est, act,
  255. &act_bpf_ops, bind, true, flags);
  256. if (ret < 0) {
  257. tcf_idr_cleanup(tn, index);
  258. return ret;
  259. }
  260. res = ACT_P_CREATED;
  261. } else if (ret > 0) {
  262. /* Don't override defaults. */
  263. if (bind)
  264. return 0;
  265. if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
  266. tcf_idr_release(*act, bind);
  267. return -EEXIST;
  268. }
  269. } else {
  270. return ret;
  271. }
  272. ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
  273. if (ret < 0)
  274. goto release_idr;
  275. is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
  276. is_ebpf = tb[TCA_ACT_BPF_FD];
  277. if (is_bpf == is_ebpf) {
  278. ret = -EINVAL;
  279. goto put_chain;
  280. }
  281. memset(&cfg, 0, sizeof(cfg));
  282. ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
  283. tcf_bpf_init_from_efd(tb, &cfg);
  284. if (ret < 0)
  285. goto put_chain;
  286. prog = to_bpf(*act);
  287. spin_lock_bh(&prog->tcf_lock);
  288. if (res != ACT_P_CREATED)
  289. tcf_bpf_prog_fill_cfg(prog, &old);
  290. prog->bpf_ops = cfg.bpf_ops;
  291. prog->bpf_name = cfg.bpf_name;
  292. if (cfg.bpf_num_ops)
  293. prog->bpf_num_ops = cfg.bpf_num_ops;
  294. goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
  295. rcu_assign_pointer(prog->filter, cfg.filter);
  296. spin_unlock_bh(&prog->tcf_lock);
  297. if (goto_ch)
  298. tcf_chain_put_by_act(goto_ch);
  299. if (res != ACT_P_CREATED) {
  300. /* make sure the program being replaced is no longer executing */
  301. synchronize_rcu();
  302. tcf_bpf_cfg_cleanup(&old);
  303. }
  304. return res;
  305. put_chain:
  306. if (goto_ch)
  307. tcf_chain_put_by_act(goto_ch);
  308. release_idr:
  309. tcf_idr_release(*act, bind);
  310. return ret;
  311. }
  312. static void tcf_bpf_cleanup(struct tc_action *act)
  313. {
  314. struct tcf_bpf_cfg tmp;
  315. tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
  316. tcf_bpf_cfg_cleanup(&tmp);
  317. }
  318. static struct tc_action_ops act_bpf_ops __read_mostly = {
  319. .kind = "bpf",
  320. .id = TCA_ID_BPF,
  321. .owner = THIS_MODULE,
  322. .act = tcf_bpf_act,
  323. .dump = tcf_bpf_dump,
  324. .cleanup = tcf_bpf_cleanup,
  325. .init = tcf_bpf_init,
  326. .size = sizeof(struct tcf_bpf),
  327. };
  328. static __net_init int bpf_init_net(struct net *net)
  329. {
  330. struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
  331. return tc_action_net_init(net, tn, &act_bpf_ops);
  332. }
  333. static void __net_exit bpf_exit_net(struct list_head *net_list)
  334. {
  335. tc_action_net_exit(net_list, act_bpf_ops.net_id);
  336. }
  337. static struct pernet_operations bpf_net_ops = {
  338. .init = bpf_init_net,
  339. .exit_batch = bpf_exit_net,
  340. .id = &act_bpf_ops.net_id,
  341. .size = sizeof(struct tc_action_net),
  342. };
  343. static int __init bpf_init_module(void)
  344. {
  345. return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
  346. }
  347. static void __exit bpf_cleanup_module(void)
  348. {
  349. tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
  350. }
  351. module_init(bpf_init_module);
  352. module_exit(bpf_cleanup_module);
  353. MODULE_AUTHOR("Jiri Pirko <[email protected]>");
  354. MODULE_DESCRIPTION("TC BPF based action");
  355. MODULE_LICENSE("GPL v2");