act_ipt.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/sched/act_ipt.c iptables target interface
  4. *
  5. *TODO: Add other tables. For now we only support the ipv4 table targets
  6. *
  7. * Copyright: Jamal Hadi Salim (2002-13)
  8. */
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include <linux/errno.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/slab.h>
  18. #include <net/netlink.h>
  19. #include <net/pkt_sched.h>
  20. #include <linux/tc_act/tc_ipt.h>
  21. #include <net/tc_act/tc_ipt.h>
  22. #include <linux/netfilter_ipv4/ip_tables.h>
  23. static struct tc_action_ops act_ipt_ops;
  24. static struct tc_action_ops act_xt_ops;
  25. static int ipt_init_target(struct net *net, struct xt_entry_target *t,
  26. char *table, unsigned int hook)
  27. {
  28. struct xt_tgchk_param par;
  29. struct xt_target *target;
  30. struct ipt_entry e = {};
  31. int ret = 0;
  32. target = xt_request_find_target(AF_INET, t->u.user.name,
  33. t->u.user.revision);
  34. if (IS_ERR(target))
  35. return PTR_ERR(target);
  36. t->u.kernel.target = target;
  37. memset(&par, 0, sizeof(par));
  38. par.net = net;
  39. par.table = table;
  40. par.entryinfo = &e;
  41. par.target = target;
  42. par.targinfo = t->data;
  43. par.hook_mask = 1 << hook;
  44. par.family = NFPROTO_IPV4;
  45. ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
  46. if (ret < 0) {
  47. module_put(t->u.kernel.target->me);
  48. return ret;
  49. }
  50. return 0;
  51. }
  52. static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
  53. {
  54. struct xt_tgdtor_param par = {
  55. .target = t->u.kernel.target,
  56. .targinfo = t->data,
  57. .family = NFPROTO_IPV4,
  58. .net = net,
  59. };
  60. if (par.target->destroy != NULL)
  61. par.target->destroy(&par);
  62. module_put(par.target->me);
  63. }
  64. static void tcf_ipt_release(struct tc_action *a)
  65. {
  66. struct tcf_ipt *ipt = to_ipt(a);
  67. if (ipt->tcfi_t) {
  68. ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
  69. kfree(ipt->tcfi_t);
  70. }
  71. kfree(ipt->tcfi_tname);
  72. }
  73. static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
  74. [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
  75. [TCA_IPT_HOOK] = NLA_POLICY_RANGE(NLA_U32, NF_INET_PRE_ROUTING,
  76. NF_INET_NUMHOOKS),
  77. [TCA_IPT_INDEX] = { .type = NLA_U32 },
  78. [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
  79. };
  80. static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
  81. struct nlattr *est, struct tc_action **a,
  82. const struct tc_action_ops *ops,
  83. struct tcf_proto *tp, u32 flags)
  84. {
  85. struct tc_action_net *tn = net_generic(net, id);
  86. bool bind = flags & TCA_ACT_FLAGS_BIND;
  87. struct nlattr *tb[TCA_IPT_MAX + 1];
  88. struct tcf_ipt *ipt;
  89. struct xt_entry_target *td, *t;
  90. char *tname;
  91. bool exists = false;
  92. int ret = 0, err;
  93. u32 hook = 0;
  94. u32 index = 0;
  95. if (nla == NULL)
  96. return -EINVAL;
  97. err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
  98. NULL);
  99. if (err < 0)
  100. return err;
  101. if (tb[TCA_IPT_INDEX] != NULL)
  102. index = nla_get_u32(tb[TCA_IPT_INDEX]);
  103. err = tcf_idr_check_alloc(tn, &index, a, bind);
  104. if (err < 0)
  105. return err;
  106. exists = err;
  107. if (exists && bind)
  108. return 0;
  109. if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
  110. if (exists)
  111. tcf_idr_release(*a, bind);
  112. else
  113. tcf_idr_cleanup(tn, index);
  114. return -EINVAL;
  115. }
  116. td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
  117. if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
  118. if (exists)
  119. tcf_idr_release(*a, bind);
  120. else
  121. tcf_idr_cleanup(tn, index);
  122. return -EINVAL;
  123. }
  124. if (!exists) {
  125. ret = tcf_idr_create(tn, index, est, a, ops, bind,
  126. false, flags);
  127. if (ret) {
  128. tcf_idr_cleanup(tn, index);
  129. return ret;
  130. }
  131. ret = ACT_P_CREATED;
  132. } else {
  133. if (bind)/* dont override defaults */
  134. return 0;
  135. if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
  136. tcf_idr_release(*a, bind);
  137. return -EEXIST;
  138. }
  139. }
  140. err = -EINVAL;
  141. hook = nla_get_u32(tb[TCA_IPT_HOOK]);
  142. switch (hook) {
  143. case NF_INET_PRE_ROUTING:
  144. break;
  145. case NF_INET_POST_ROUTING:
  146. break;
  147. default:
  148. goto err1;
  149. }
  150. if (tb[TCA_IPT_TABLE]) {
  151. /* mangle only for now */
  152. if (nla_strcmp(tb[TCA_IPT_TABLE], "mangle"))
  153. goto err1;
  154. }
  155. tname = kstrdup("mangle", GFP_KERNEL);
  156. if (unlikely(!tname))
  157. goto err1;
  158. t = kmemdup(td, td->u.target_size, GFP_KERNEL);
  159. if (unlikely(!t))
  160. goto err2;
  161. err = ipt_init_target(net, t, tname, hook);
  162. if (err < 0)
  163. goto err3;
  164. ipt = to_ipt(*a);
  165. spin_lock_bh(&ipt->tcf_lock);
  166. if (ret != ACT_P_CREATED) {
  167. ipt_destroy_target(ipt->tcfi_t, net);
  168. kfree(ipt->tcfi_tname);
  169. kfree(ipt->tcfi_t);
  170. }
  171. ipt->tcfi_tname = tname;
  172. ipt->tcfi_t = t;
  173. ipt->tcfi_hook = hook;
  174. spin_unlock_bh(&ipt->tcf_lock);
  175. return ret;
  176. err3:
  177. kfree(t);
  178. err2:
  179. kfree(tname);
  180. err1:
  181. tcf_idr_release(*a, bind);
  182. return err;
  183. }
  184. static int tcf_ipt_init(struct net *net, struct nlattr *nla,
  185. struct nlattr *est, struct tc_action **a,
  186. struct tcf_proto *tp,
  187. u32 flags, struct netlink_ext_ack *extack)
  188. {
  189. return __tcf_ipt_init(net, act_ipt_ops.net_id, nla, est,
  190. a, &act_ipt_ops, tp, flags);
  191. }
  192. static int tcf_xt_init(struct net *net, struct nlattr *nla,
  193. struct nlattr *est, struct tc_action **a,
  194. struct tcf_proto *tp,
  195. u32 flags, struct netlink_ext_ack *extack)
  196. {
  197. return __tcf_ipt_init(net, act_xt_ops.net_id, nla, est,
  198. a, &act_xt_ops, tp, flags);
  199. }
  200. static bool tcf_ipt_act_check(struct sk_buff *skb)
  201. {
  202. const struct iphdr *iph;
  203. unsigned int nhoff, len;
  204. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  205. return false;
  206. nhoff = skb_network_offset(skb);
  207. iph = ip_hdr(skb);
  208. if (iph->ihl < 5 || iph->version != 4)
  209. return false;
  210. len = skb_ip_totlen(skb);
  211. if (skb->len < nhoff + len || len < (iph->ihl * 4u))
  212. return false;
  213. return pskb_may_pull(skb, iph->ihl * 4u);
  214. }
  215. static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
  216. struct tcf_result *res)
  217. {
  218. int ret = 0, result = 0;
  219. struct tcf_ipt *ipt = to_ipt(a);
  220. struct xt_action_param par;
  221. struct nf_hook_state state = {
  222. .net = dev_net(skb->dev),
  223. .in = skb->dev,
  224. .hook = ipt->tcfi_hook,
  225. .pf = NFPROTO_IPV4,
  226. };
  227. if (skb_protocol(skb, false) != htons(ETH_P_IP))
  228. return TC_ACT_UNSPEC;
  229. if (skb_unclone(skb, GFP_ATOMIC))
  230. return TC_ACT_UNSPEC;
  231. if (!tcf_ipt_act_check(skb))
  232. return TC_ACT_UNSPEC;
  233. if (state.hook == NF_INET_POST_ROUTING) {
  234. if (!skb_dst(skb))
  235. return TC_ACT_UNSPEC;
  236. state.out = skb->dev;
  237. }
  238. spin_lock(&ipt->tcf_lock);
  239. tcf_lastuse_update(&ipt->tcf_tm);
  240. bstats_update(&ipt->tcf_bstats, skb);
  241. /* yes, we have to worry about both in and out dev
  242. * worry later - danger - this API seems to have changed
  243. * from earlier kernels
  244. */
  245. par.state = &state;
  246. par.target = ipt->tcfi_t->u.kernel.target;
  247. par.targinfo = ipt->tcfi_t->data;
  248. ret = par.target->target(skb, &par);
  249. switch (ret) {
  250. case NF_ACCEPT:
  251. result = TC_ACT_OK;
  252. break;
  253. case NF_DROP:
  254. result = TC_ACT_SHOT;
  255. ipt->tcf_qstats.drops++;
  256. break;
  257. case XT_CONTINUE:
  258. result = TC_ACT_PIPE;
  259. break;
  260. default:
  261. net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
  262. ret);
  263. result = TC_ACT_OK;
  264. break;
  265. }
  266. spin_unlock(&ipt->tcf_lock);
  267. return result;
  268. }
  269. static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
  270. int ref)
  271. {
  272. unsigned char *b = skb_tail_pointer(skb);
  273. struct tcf_ipt *ipt = to_ipt(a);
  274. struct xt_entry_target *t;
  275. struct tcf_t tm;
  276. struct tc_cnt c;
  277. /* for simple targets kernel size == user size
  278. * user name = target name
  279. * for foolproof you need to not assume this
  280. */
  281. spin_lock_bh(&ipt->tcf_lock);
  282. t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
  283. if (unlikely(!t))
  284. goto nla_put_failure;
  285. c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
  286. c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
  287. strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
  288. if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
  289. nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
  290. nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
  291. nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
  292. nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
  293. goto nla_put_failure;
  294. tcf_tm_dump(&tm, &ipt->tcf_tm);
  295. if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
  296. goto nla_put_failure;
  297. spin_unlock_bh(&ipt->tcf_lock);
  298. kfree(t);
  299. return skb->len;
  300. nla_put_failure:
  301. spin_unlock_bh(&ipt->tcf_lock);
  302. nlmsg_trim(skb, b);
  303. kfree(t);
  304. return -1;
  305. }
  306. static struct tc_action_ops act_ipt_ops = {
  307. .kind = "ipt",
  308. .id = TCA_ID_IPT,
  309. .owner = THIS_MODULE,
  310. .act = tcf_ipt_act,
  311. .dump = tcf_ipt_dump,
  312. .cleanup = tcf_ipt_release,
  313. .init = tcf_ipt_init,
  314. .size = sizeof(struct tcf_ipt),
  315. };
  316. static __net_init int ipt_init_net(struct net *net)
  317. {
  318. struct tc_action_net *tn = net_generic(net, act_ipt_ops.net_id);
  319. return tc_action_net_init(net, tn, &act_ipt_ops);
  320. }
  321. static void __net_exit ipt_exit_net(struct list_head *net_list)
  322. {
  323. tc_action_net_exit(net_list, act_ipt_ops.net_id);
  324. }
  325. static struct pernet_operations ipt_net_ops = {
  326. .init = ipt_init_net,
  327. .exit_batch = ipt_exit_net,
  328. .id = &act_ipt_ops.net_id,
  329. .size = sizeof(struct tc_action_net),
  330. };
  331. static struct tc_action_ops act_xt_ops = {
  332. .kind = "xt",
  333. .id = TCA_ID_XT,
  334. .owner = THIS_MODULE,
  335. .act = tcf_ipt_act,
  336. .dump = tcf_ipt_dump,
  337. .cleanup = tcf_ipt_release,
  338. .init = tcf_xt_init,
  339. .size = sizeof(struct tcf_ipt),
  340. };
  341. static __net_init int xt_init_net(struct net *net)
  342. {
  343. struct tc_action_net *tn = net_generic(net, act_xt_ops.net_id);
  344. return tc_action_net_init(net, tn, &act_xt_ops);
  345. }
  346. static void __net_exit xt_exit_net(struct list_head *net_list)
  347. {
  348. tc_action_net_exit(net_list, act_xt_ops.net_id);
  349. }
  350. static struct pernet_operations xt_net_ops = {
  351. .init = xt_init_net,
  352. .exit_batch = xt_exit_net,
  353. .id = &act_xt_ops.net_id,
  354. .size = sizeof(struct tc_action_net),
  355. };
  356. MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
  357. MODULE_DESCRIPTION("Iptables target actions");
  358. MODULE_LICENSE("GPL");
  359. MODULE_ALIAS("act_xt");
  360. static int __init ipt_init_module(void)
  361. {
  362. int ret1, ret2;
  363. ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
  364. if (ret1 < 0)
  365. pr_err("Failed to load xt action\n");
  366. ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
  367. if (ret2 < 0)
  368. pr_err("Failed to load ipt action\n");
  369. if (ret1 < 0 && ret2 < 0) {
  370. return ret1;
  371. } else
  372. return 0;
  373. }
  374. static void __exit ipt_cleanup_module(void)
  375. {
  376. tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
  377. tcf_unregister_action(&act_xt_ops, &xt_net_ops);
  378. }
  379. module_init(ipt_init_module);
  380. module_exit(ipt_cleanup_module);