act_csum.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Checksum updating actions
  4. *
  5. * Copyright (c) 2010 Gregoire Baron <[email protected]>
  6. */
  7. #include <linux/types.h>
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/netlink.h>
  13. #include <net/netlink.h>
  14. #include <linux/rtnetlink.h>
  15. #include <linux/skbuff.h>
  16. #include <net/ip.h>
  17. #include <net/ipv6.h>
  18. #include <net/icmp.h>
  19. #include <linux/icmpv6.h>
  20. #include <linux/igmp.h>
  21. #include <net/tcp.h>
  22. #include <net/udp.h>
  23. #include <net/ip6_checksum.h>
  24. #include <net/sctp/checksum.h>
  25. #include <net/act_api.h>
  26. #include <net/pkt_cls.h>
  27. #include <linux/tc_act/tc_csum.h>
  28. #include <net/tc_act/tc_csum.h>
  29. static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
  30. [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
  31. };
  32. static struct tc_action_ops act_csum_ops;
  33. static int tcf_csum_init(struct net *net, struct nlattr *nla,
  34. struct nlattr *est, struct tc_action **a,
  35. struct tcf_proto *tp,
  36. u32 flags, struct netlink_ext_ack *extack)
  37. {
  38. struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
  39. bool bind = flags & TCA_ACT_FLAGS_BIND;
  40. struct tcf_csum_params *params_new;
  41. struct nlattr *tb[TCA_CSUM_MAX + 1];
  42. struct tcf_chain *goto_ch = NULL;
  43. struct tc_csum *parm;
  44. struct tcf_csum *p;
  45. int ret = 0, err;
  46. u32 index;
  47. if (nla == NULL)
  48. return -EINVAL;
  49. err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
  50. NULL);
  51. if (err < 0)
  52. return err;
  53. if (tb[TCA_CSUM_PARMS] == NULL)
  54. return -EINVAL;
  55. parm = nla_data(tb[TCA_CSUM_PARMS]);
  56. index = parm->index;
  57. err = tcf_idr_check_alloc(tn, &index, a, bind);
  58. if (!err) {
  59. ret = tcf_idr_create_from_flags(tn, index, est, a,
  60. &act_csum_ops, bind, flags);
  61. if (ret) {
  62. tcf_idr_cleanup(tn, index);
  63. return ret;
  64. }
  65. ret = ACT_P_CREATED;
  66. } else if (err > 0) {
  67. if (bind)/* dont override defaults */
  68. return 0;
  69. if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
  70. tcf_idr_release(*a, bind);
  71. return -EEXIST;
  72. }
  73. } else {
  74. return err;
  75. }
  76. err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
  77. if (err < 0)
  78. goto release_idr;
  79. p = to_tcf_csum(*a);
  80. params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
  81. if (unlikely(!params_new)) {
  82. err = -ENOMEM;
  83. goto put_chain;
  84. }
  85. params_new->update_flags = parm->update_flags;
  86. spin_lock_bh(&p->tcf_lock);
  87. goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
  88. params_new = rcu_replace_pointer(p->params, params_new,
  89. lockdep_is_held(&p->tcf_lock));
  90. spin_unlock_bh(&p->tcf_lock);
  91. if (goto_ch)
  92. tcf_chain_put_by_act(goto_ch);
  93. if (params_new)
  94. kfree_rcu(params_new, rcu);
  95. return ret;
  96. put_chain:
  97. if (goto_ch)
  98. tcf_chain_put_by_act(goto_ch);
  99. release_idr:
  100. tcf_idr_release(*a, bind);
  101. return err;
  102. }
  103. /**
  104. * tcf_csum_skb_nextlayer - Get next layer pointer
  105. * @skb: sk_buff to use
  106. * @ihl: previous summed headers length
  107. * @ipl: complete packet length
  108. * @jhl: next header length
  109. *
  110. * Check the expected next layer availability in the specified sk_buff.
  111. * Return the next layer pointer if pass, NULL otherwise.
  112. */
  113. static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
  114. unsigned int ihl, unsigned int ipl,
  115. unsigned int jhl)
  116. {
  117. int ntkoff = skb_network_offset(skb);
  118. int hl = ihl + jhl;
  119. if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
  120. skb_try_make_writable(skb, hl + ntkoff))
  121. return NULL;
  122. else
  123. return (void *)(skb_network_header(skb) + ihl);
  124. }
  125. static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
  126. unsigned int ipl)
  127. {
  128. struct icmphdr *icmph;
  129. icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
  130. if (icmph == NULL)
  131. return 0;
  132. icmph->checksum = 0;
  133. skb->csum = csum_partial(icmph, ipl - ihl, 0);
  134. icmph->checksum = csum_fold(skb->csum);
  135. skb->ip_summed = CHECKSUM_NONE;
  136. return 1;
  137. }
  138. static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
  139. unsigned int ihl, unsigned int ipl)
  140. {
  141. struct igmphdr *igmph;
  142. igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
  143. if (igmph == NULL)
  144. return 0;
  145. igmph->csum = 0;
  146. skb->csum = csum_partial(igmph, ipl - ihl, 0);
  147. igmph->csum = csum_fold(skb->csum);
  148. skb->ip_summed = CHECKSUM_NONE;
  149. return 1;
  150. }
  151. static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
  152. unsigned int ipl)
  153. {
  154. struct icmp6hdr *icmp6h;
  155. const struct ipv6hdr *ip6h;
  156. icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
  157. if (icmp6h == NULL)
  158. return 0;
  159. ip6h = ipv6_hdr(skb);
  160. icmp6h->icmp6_cksum = 0;
  161. skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
  162. icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  163. ipl - ihl, IPPROTO_ICMPV6,
  164. skb->csum);
  165. skb->ip_summed = CHECKSUM_NONE;
  166. return 1;
  167. }
  168. static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
  169. unsigned int ipl)
  170. {
  171. struct tcphdr *tcph;
  172. const struct iphdr *iph;
  173. if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  174. return 1;
  175. tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
  176. if (tcph == NULL)
  177. return 0;
  178. iph = ip_hdr(skb);
  179. tcph->check = 0;
  180. skb->csum = csum_partial(tcph, ipl - ihl, 0);
  181. tcph->check = tcp_v4_check(ipl - ihl,
  182. iph->saddr, iph->daddr, skb->csum);
  183. skb->ip_summed = CHECKSUM_NONE;
  184. return 1;
  185. }
  186. static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
  187. unsigned int ipl)
  188. {
  189. struct tcphdr *tcph;
  190. const struct ipv6hdr *ip6h;
  191. if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  192. return 1;
  193. tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
  194. if (tcph == NULL)
  195. return 0;
  196. ip6h = ipv6_hdr(skb);
  197. tcph->check = 0;
  198. skb->csum = csum_partial(tcph, ipl - ihl, 0);
  199. tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  200. ipl - ihl, IPPROTO_TCP,
  201. skb->csum);
  202. skb->ip_summed = CHECKSUM_NONE;
  203. return 1;
  204. }
  205. static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
  206. unsigned int ipl, int udplite)
  207. {
  208. struct udphdr *udph;
  209. const struct iphdr *iph;
  210. u16 ul;
  211. if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  212. return 1;
  213. /*
  214. * Support both UDP and UDPLITE checksum algorithms, Don't use
  215. * udph->len to get the real length without any protocol check,
  216. * UDPLITE uses udph->len for another thing,
  217. * Use iph->tot_len, or just ipl.
  218. */
  219. udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
  220. if (udph == NULL)
  221. return 0;
  222. iph = ip_hdr(skb);
  223. ul = ntohs(udph->len);
  224. if (udplite || udph->check) {
  225. udph->check = 0;
  226. if (udplite) {
  227. if (ul == 0)
  228. skb->csum = csum_partial(udph, ipl - ihl, 0);
  229. else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
  230. skb->csum = csum_partial(udph, ul, 0);
  231. else
  232. goto ignore_obscure_skb;
  233. } else {
  234. if (ul != ipl - ihl)
  235. goto ignore_obscure_skb;
  236. skb->csum = csum_partial(udph, ul, 0);
  237. }
  238. udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
  239. ul, iph->protocol,
  240. skb->csum);
  241. if (!udph->check)
  242. udph->check = CSUM_MANGLED_0;
  243. }
  244. skb->ip_summed = CHECKSUM_NONE;
  245. ignore_obscure_skb:
  246. return 1;
  247. }
  248. static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
  249. unsigned int ipl, int udplite)
  250. {
  251. struct udphdr *udph;
  252. const struct ipv6hdr *ip6h;
  253. u16 ul;
  254. if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  255. return 1;
  256. /*
  257. * Support both UDP and UDPLITE checksum algorithms, Don't use
  258. * udph->len to get the real length without any protocol check,
  259. * UDPLITE uses udph->len for another thing,
  260. * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
  261. */
  262. udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
  263. if (udph == NULL)
  264. return 0;
  265. ip6h = ipv6_hdr(skb);
  266. ul = ntohs(udph->len);
  267. udph->check = 0;
  268. if (udplite) {
  269. if (ul == 0)
  270. skb->csum = csum_partial(udph, ipl - ihl, 0);
  271. else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
  272. skb->csum = csum_partial(udph, ul, 0);
  273. else
  274. goto ignore_obscure_skb;
  275. } else {
  276. if (ul != ipl - ihl)
  277. goto ignore_obscure_skb;
  278. skb->csum = csum_partial(udph, ul, 0);
  279. }
  280. udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
  281. udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
  282. skb->csum);
  283. if (!udph->check)
  284. udph->check = CSUM_MANGLED_0;
  285. skb->ip_summed = CHECKSUM_NONE;
  286. ignore_obscure_skb:
  287. return 1;
  288. }
  289. static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
  290. unsigned int ipl)
  291. {
  292. struct sctphdr *sctph;
  293. if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
  294. return 1;
  295. sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
  296. if (!sctph)
  297. return 0;
  298. sctph->checksum = sctp_compute_cksum(skb,
  299. skb_network_offset(skb) + ihl);
  300. skb->ip_summed = CHECKSUM_NONE;
  301. skb->csum_not_inet = 0;
  302. return 1;
  303. }
  304. static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
  305. {
  306. const struct iphdr *iph;
  307. int ntkoff;
  308. ntkoff = skb_network_offset(skb);
  309. if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
  310. goto fail;
  311. iph = ip_hdr(skb);
  312. switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
  313. case IPPROTO_ICMP:
  314. if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
  315. if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
  316. ntohs(iph->tot_len)))
  317. goto fail;
  318. break;
  319. case IPPROTO_IGMP:
  320. if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
  321. if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
  322. ntohs(iph->tot_len)))
  323. goto fail;
  324. break;
  325. case IPPROTO_TCP:
  326. if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
  327. if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
  328. ntohs(iph->tot_len)))
  329. goto fail;
  330. break;
  331. case IPPROTO_UDP:
  332. if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
  333. if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
  334. ntohs(iph->tot_len), 0))
  335. goto fail;
  336. break;
  337. case IPPROTO_UDPLITE:
  338. if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
  339. if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
  340. ntohs(iph->tot_len), 1))
  341. goto fail;
  342. break;
  343. case IPPROTO_SCTP:
  344. if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
  345. !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
  346. goto fail;
  347. break;
  348. }
  349. if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
  350. if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
  351. goto fail;
  352. ip_send_check(ip_hdr(skb));
  353. }
  354. return 1;
  355. fail:
  356. return 0;
  357. }
  358. static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
  359. unsigned int *pl)
  360. {
  361. int off, len, optlen;
  362. unsigned char *xh = (void *)ip6xh;
  363. off = sizeof(*ip6xh);
  364. len = ixhl - off;
  365. while (len > 1) {
  366. switch (xh[off]) {
  367. case IPV6_TLV_PAD1:
  368. optlen = 1;
  369. break;
  370. case IPV6_TLV_JUMBO:
  371. optlen = xh[off + 1] + 2;
  372. if (optlen != 6 || len < 6 || (off & 3) != 2)
  373. /* wrong jumbo option length/alignment */
  374. return 0;
  375. *pl = ntohl(*(__be32 *)(xh + off + 2));
  376. goto done;
  377. default:
  378. optlen = xh[off + 1] + 2;
  379. if (optlen > len)
  380. /* ignore obscure options */
  381. goto done;
  382. break;
  383. }
  384. off += optlen;
  385. len -= optlen;
  386. }
  387. done:
  388. return 1;
  389. }
  390. static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
  391. {
  392. struct ipv6hdr *ip6h;
  393. struct ipv6_opt_hdr *ip6xh;
  394. unsigned int hl, ixhl;
  395. unsigned int pl;
  396. int ntkoff;
  397. u8 nexthdr;
  398. ntkoff = skb_network_offset(skb);
  399. hl = sizeof(*ip6h);
  400. if (!pskb_may_pull(skb, hl + ntkoff))
  401. goto fail;
  402. ip6h = ipv6_hdr(skb);
  403. pl = ntohs(ip6h->payload_len);
  404. nexthdr = ip6h->nexthdr;
  405. do {
  406. switch (nexthdr) {
  407. case NEXTHDR_FRAGMENT:
  408. goto ignore_skb;
  409. case NEXTHDR_ROUTING:
  410. case NEXTHDR_HOP:
  411. case NEXTHDR_DEST:
  412. if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
  413. goto fail;
  414. ip6xh = (void *)(skb_network_header(skb) + hl);
  415. ixhl = ipv6_optlen(ip6xh);
  416. if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
  417. goto fail;
  418. ip6xh = (void *)(skb_network_header(skb) + hl);
  419. if ((nexthdr == NEXTHDR_HOP) &&
  420. !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
  421. goto fail;
  422. nexthdr = ip6xh->nexthdr;
  423. hl += ixhl;
  424. break;
  425. case IPPROTO_ICMPV6:
  426. if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
  427. if (!tcf_csum_ipv6_icmp(skb,
  428. hl, pl + sizeof(*ip6h)))
  429. goto fail;
  430. goto done;
  431. case IPPROTO_TCP:
  432. if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
  433. if (!tcf_csum_ipv6_tcp(skb,
  434. hl, pl + sizeof(*ip6h)))
  435. goto fail;
  436. goto done;
  437. case IPPROTO_UDP:
  438. if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
  439. if (!tcf_csum_ipv6_udp(skb, hl,
  440. pl + sizeof(*ip6h), 0))
  441. goto fail;
  442. goto done;
  443. case IPPROTO_UDPLITE:
  444. if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
  445. if (!tcf_csum_ipv6_udp(skb, hl,
  446. pl + sizeof(*ip6h), 1))
  447. goto fail;
  448. goto done;
  449. case IPPROTO_SCTP:
  450. if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
  451. !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
  452. goto fail;
  453. goto done;
  454. default:
  455. goto ignore_skb;
  456. }
  457. } while (pskb_may_pull(skb, hl + 1 + ntkoff));
  458. done:
  459. ignore_skb:
  460. return 1;
  461. fail:
  462. return 0;
  463. }
  464. static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
  465. struct tcf_result *res)
  466. {
  467. struct tcf_csum *p = to_tcf_csum(a);
  468. bool orig_vlan_tag_present = false;
  469. unsigned int vlan_hdr_count = 0;
  470. struct tcf_csum_params *params;
  471. u32 update_flags;
  472. __be16 protocol;
  473. int action;
  474. params = rcu_dereference_bh(p->params);
  475. tcf_lastuse_update(&p->tcf_tm);
  476. tcf_action_update_bstats(&p->common, skb);
  477. action = READ_ONCE(p->tcf_action);
  478. if (unlikely(action == TC_ACT_SHOT))
  479. goto drop;
  480. update_flags = params->update_flags;
  481. protocol = skb_protocol(skb, false);
  482. again:
  483. switch (protocol) {
  484. case cpu_to_be16(ETH_P_IP):
  485. if (!tcf_csum_ipv4(skb, update_flags))
  486. goto drop;
  487. break;
  488. case cpu_to_be16(ETH_P_IPV6):
  489. if (!tcf_csum_ipv6(skb, update_flags))
  490. goto drop;
  491. break;
  492. case cpu_to_be16(ETH_P_8021AD):
  493. fallthrough;
  494. case cpu_to_be16(ETH_P_8021Q):
  495. if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
  496. protocol = skb->protocol;
  497. orig_vlan_tag_present = true;
  498. } else {
  499. struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
  500. protocol = vlan->h_vlan_encapsulated_proto;
  501. skb_pull(skb, VLAN_HLEN);
  502. skb_reset_network_header(skb);
  503. vlan_hdr_count++;
  504. }
  505. goto again;
  506. }
  507. out:
  508. /* Restore the skb for the pulled VLAN tags */
  509. while (vlan_hdr_count--) {
  510. skb_push(skb, VLAN_HLEN);
  511. skb_reset_network_header(skb);
  512. }
  513. return action;
  514. drop:
  515. tcf_action_inc_drop_qstats(&p->common);
  516. action = TC_ACT_SHOT;
  517. goto out;
  518. }
  519. static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
  520. int ref)
  521. {
  522. unsigned char *b = skb_tail_pointer(skb);
  523. struct tcf_csum *p = to_tcf_csum(a);
  524. struct tcf_csum_params *params;
  525. struct tc_csum opt = {
  526. .index = p->tcf_index,
  527. .refcnt = refcount_read(&p->tcf_refcnt) - ref,
  528. .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
  529. };
  530. struct tcf_t t;
  531. spin_lock_bh(&p->tcf_lock);
  532. params = rcu_dereference_protected(p->params,
  533. lockdep_is_held(&p->tcf_lock));
  534. opt.action = p->tcf_action;
  535. opt.update_flags = params->update_flags;
  536. if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
  537. goto nla_put_failure;
  538. tcf_tm_dump(&t, &p->tcf_tm);
  539. if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
  540. goto nla_put_failure;
  541. spin_unlock_bh(&p->tcf_lock);
  542. return skb->len;
  543. nla_put_failure:
  544. spin_unlock_bh(&p->tcf_lock);
  545. nlmsg_trim(skb, b);
  546. return -1;
  547. }
  548. static void tcf_csum_cleanup(struct tc_action *a)
  549. {
  550. struct tcf_csum *p = to_tcf_csum(a);
  551. struct tcf_csum_params *params;
  552. params = rcu_dereference_protected(p->params, 1);
  553. if (params)
  554. kfree_rcu(params, rcu);
  555. }
  556. static size_t tcf_csum_get_fill_size(const struct tc_action *act)
  557. {
  558. return nla_total_size(sizeof(struct tc_csum));
  559. }
  560. static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
  561. u32 *index_inc, bool bind,
  562. struct netlink_ext_ack *extack)
  563. {
  564. if (bind) {
  565. struct flow_action_entry *entry = entry_data;
  566. entry->id = FLOW_ACTION_CSUM;
  567. entry->csum_flags = tcf_csum_update_flags(act);
  568. *index_inc = 1;
  569. } else {
  570. struct flow_offload_action *fl_action = entry_data;
  571. fl_action->id = FLOW_ACTION_CSUM;
  572. }
  573. return 0;
  574. }
  575. static struct tc_action_ops act_csum_ops = {
  576. .kind = "csum",
  577. .id = TCA_ID_CSUM,
  578. .owner = THIS_MODULE,
  579. .act = tcf_csum_act,
  580. .dump = tcf_csum_dump,
  581. .init = tcf_csum_init,
  582. .cleanup = tcf_csum_cleanup,
  583. .get_fill_size = tcf_csum_get_fill_size,
  584. .offload_act_setup = tcf_csum_offload_act_setup,
  585. .size = sizeof(struct tcf_csum),
  586. };
  587. static __net_init int csum_init_net(struct net *net)
  588. {
  589. struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
  590. return tc_action_net_init(net, tn, &act_csum_ops);
  591. }
  592. static void __net_exit csum_exit_net(struct list_head *net_list)
  593. {
  594. tc_action_net_exit(net_list, act_csum_ops.net_id);
  595. }
  596. static struct pernet_operations csum_net_ops = {
  597. .init = csum_init_net,
  598. .exit_batch = csum_exit_net,
  599. .id = &act_csum_ops.net_id,
  600. .size = sizeof(struct tc_action_net),
  601. };
  602. MODULE_DESCRIPTION("Checksum updating actions");
  603. MODULE_LICENSE("GPL");
  604. static int __init csum_init_module(void)
  605. {
  606. return tcf_register_action(&act_csum_ops, &csum_net_ops);
  607. }
  608. static void __exit csum_cleanup_module(void)
  609. {
  610. tcf_unregister_action(&act_csum_ops, &csum_net_ops);
  611. }
  612. module_init(csum_init_module);
  613. module_exit(csum_cleanup_module);