esp6_offload.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * IPV6 GSO/GRO offload support
  4. * Linux INET implementation
  5. *
  6. * Copyright (C) 2016 secunet Security Networks AG
  7. * Author: Steffen Klassert <[email protected]>
  8. *
  9. * ESP GRO support
  10. */
  11. #include <linux/skbuff.h>
  12. #include <linux/init.h>
  13. #include <net/protocol.h>
  14. #include <crypto/aead.h>
  15. #include <crypto/authenc.h>
  16. #include <linux/err.h>
  17. #include <linux/module.h>
  18. #include <net/gro.h>
  19. #include <net/ip.h>
  20. #include <net/xfrm.h>
  21. #include <net/esp.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/kernel.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <net/ip6_route.h>
  27. #include <net/ipv6.h>
  28. #include <linux/icmpv6.h>
  29. static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
  30. {
  31. int off = sizeof(struct ipv6hdr);
  32. struct ipv6_opt_hdr *exthdr;
  33. if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
  34. return offsetof(struct ipv6hdr, nexthdr);
  35. while (off < nhlen) {
  36. exthdr = (void *)ipv6_hdr + off;
  37. if (exthdr->nexthdr == NEXTHDR_ESP)
  38. return off;
  39. off += ipv6_optlen(exthdr);
  40. }
  41. return 0;
  42. }
  43. static struct sk_buff *esp6_gro_receive(struct list_head *head,
  44. struct sk_buff *skb)
  45. {
  46. int offset = skb_gro_offset(skb);
  47. struct xfrm_offload *xo;
  48. struct xfrm_state *x;
  49. __be32 seq;
  50. __be32 spi;
  51. int nhoff;
  52. int err;
  53. if (!pskb_pull(skb, offset))
  54. return NULL;
  55. if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
  56. goto out;
  57. xo = xfrm_offload(skb);
  58. if (!xo || !(xo->flags & CRYPTO_DONE)) {
  59. struct sec_path *sp = secpath_set(skb);
  60. if (!sp)
  61. goto out;
  62. if (sp->len == XFRM_MAX_DEPTH)
  63. goto out_reset;
  64. x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
  65. (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
  66. spi, IPPROTO_ESP, AF_INET6);
  67. if (!x)
  68. goto out_reset;
  69. skb->mark = xfrm_smark_get(skb->mark, x);
  70. sp->xvec[sp->len++] = x;
  71. sp->olen++;
  72. xo = xfrm_offload(skb);
  73. if (!xo)
  74. goto out_reset;
  75. }
  76. xo->flags |= XFRM_GRO;
  77. nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
  78. if (!nhoff)
  79. goto out;
  80. IP6CB(skb)->nhoff = nhoff;
  81. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
  82. XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
  83. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
  84. XFRM_SPI_SKB_CB(skb)->seq = seq;
  85. /* We don't need to handle errors from xfrm_input, it does all
  86. * the error handling and frees the resources on error. */
  87. xfrm_input(skb, IPPROTO_ESP, spi, -2);
  88. return ERR_PTR(-EINPROGRESS);
  89. out_reset:
  90. secpath_reset(skb);
  91. out:
  92. skb_push(skb, offset);
  93. NAPI_GRO_CB(skb)->same_flow = 0;
  94. NAPI_GRO_CB(skb)->flush = 1;
  95. return NULL;
  96. }
  97. static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
  98. {
  99. struct ip_esp_hdr *esph;
  100. struct ipv6hdr *iph = ipv6_hdr(skb);
  101. struct xfrm_offload *xo = xfrm_offload(skb);
  102. u8 proto = iph->nexthdr;
  103. skb_push(skb, -skb_network_offset(skb));
  104. if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
  105. __be16 frag;
  106. ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
  107. }
  108. esph = ip_esp_hdr(skb);
  109. *skb_mac_header(skb) = IPPROTO_ESP;
  110. esph->spi = x->id.spi;
  111. esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  112. xo->proto = proto;
  113. }
  114. static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
  115. struct sk_buff *skb,
  116. netdev_features_t features)
  117. {
  118. __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
  119. : htons(ETH_P_IPV6);
  120. return skb_eth_gso_segment(skb, features, type);
  121. }
  122. static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
  123. struct sk_buff *skb,
  124. netdev_features_t features)
  125. {
  126. const struct net_offload *ops;
  127. struct sk_buff *segs = ERR_PTR(-EINVAL);
  128. struct xfrm_offload *xo = xfrm_offload(skb);
  129. skb->transport_header += x->props.header_len;
  130. ops = rcu_dereference(inet6_offloads[xo->proto]);
  131. if (likely(ops && ops->callbacks.gso_segment))
  132. segs = ops->callbacks.gso_segment(skb, features);
  133. return segs;
  134. }
  135. static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
  136. struct sk_buff *skb,
  137. netdev_features_t features)
  138. {
  139. struct xfrm_offload *xo = xfrm_offload(skb);
  140. struct sk_buff *segs = ERR_PTR(-EINVAL);
  141. const struct net_offload *ops;
  142. u8 proto = xo->proto;
  143. skb->transport_header += x->props.header_len;
  144. if (x->sel.family != AF_INET6) {
  145. skb->transport_header -=
  146. (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
  147. if (proto == IPPROTO_BEETPH) {
  148. struct ip_beet_phdr *ph =
  149. (struct ip_beet_phdr *)skb->data;
  150. skb->transport_header += ph->hdrlen * 8;
  151. proto = ph->nexthdr;
  152. } else {
  153. skb->transport_header -= IPV4_BEET_PHMAXLEN;
  154. }
  155. if (proto == IPPROTO_TCP)
  156. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
  157. } else {
  158. __be16 frag;
  159. skb->transport_header +=
  160. ipv6_skip_exthdr(skb, 0, &proto, &frag);
  161. }
  162. if (proto == IPPROTO_IPIP)
  163. skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
  164. __skb_pull(skb, skb_transport_offset(skb));
  165. ops = rcu_dereference(inet6_offloads[proto]);
  166. if (likely(ops && ops->callbacks.gso_segment))
  167. segs = ops->callbacks.gso_segment(skb, features);
  168. return segs;
  169. }
  170. static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
  171. struct sk_buff *skb,
  172. netdev_features_t features)
  173. {
  174. switch (x->outer_mode.encap) {
  175. case XFRM_MODE_TUNNEL:
  176. return xfrm6_tunnel_gso_segment(x, skb, features);
  177. case XFRM_MODE_TRANSPORT:
  178. return xfrm6_transport_gso_segment(x, skb, features);
  179. case XFRM_MODE_BEET:
  180. return xfrm6_beet_gso_segment(x, skb, features);
  181. }
  182. return ERR_PTR(-EOPNOTSUPP);
  183. }
  184. static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
  185. netdev_features_t features)
  186. {
  187. struct xfrm_state *x;
  188. struct ip_esp_hdr *esph;
  189. struct crypto_aead *aead;
  190. netdev_features_t esp_features = features;
  191. struct xfrm_offload *xo = xfrm_offload(skb);
  192. struct sec_path *sp;
  193. if (!xo)
  194. return ERR_PTR(-EINVAL);
  195. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
  196. return ERR_PTR(-EINVAL);
  197. sp = skb_sec_path(skb);
  198. x = sp->xvec[sp->len - 1];
  199. aead = x->data;
  200. esph = ip_esp_hdr(skb);
  201. if (esph->spi != x->id.spi)
  202. return ERR_PTR(-EINVAL);
  203. if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
  204. return ERR_PTR(-EINVAL);
  205. __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
  206. skb->encap_hdr_csum = 1;
  207. if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
  208. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
  209. NETIF_F_SCTP_CRC);
  210. else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
  211. esp_features = features & ~(NETIF_F_CSUM_MASK |
  212. NETIF_F_SCTP_CRC);
  213. xo->flags |= XFRM_GSO_SEGMENT;
  214. return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
  215. }
  216. static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
  217. {
  218. struct crypto_aead *aead = x->data;
  219. struct xfrm_offload *xo = xfrm_offload(skb);
  220. if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
  221. return -EINVAL;
  222. if (!(xo->flags & CRYPTO_DONE))
  223. skb->ip_summed = CHECKSUM_NONE;
  224. return esp6_input_done2(skb, 0);
  225. }
  226. static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
  227. {
  228. int len;
  229. int err;
  230. int alen;
  231. int blksize;
  232. struct xfrm_offload *xo;
  233. struct crypto_aead *aead;
  234. struct esp_info esp;
  235. bool hw_offload = true;
  236. __u32 seq;
  237. esp.inplace = true;
  238. xo = xfrm_offload(skb);
  239. if (!xo)
  240. return -EINVAL;
  241. if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
  242. xo->flags |= CRYPTO_FALLBACK;
  243. hw_offload = false;
  244. }
  245. esp.proto = xo->proto;
  246. /* skb is pure payload to encrypt */
  247. aead = x->data;
  248. alen = crypto_aead_authsize(aead);
  249. esp.tfclen = 0;
  250. /* XXX: Add support for tfc padding here. */
  251. blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  252. esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
  253. esp.plen = esp.clen - skb->len - esp.tfclen;
  254. esp.tailen = esp.tfclen + esp.plen + alen;
  255. if (!hw_offload || !skb_is_gso(skb)) {
  256. esp.nfrags = esp6_output_head(x, skb, &esp);
  257. if (esp.nfrags < 0)
  258. return esp.nfrags;
  259. }
  260. seq = xo->seq.low;
  261. esp.esph = ip_esp_hdr(skb);
  262. esp.esph->spi = x->id.spi;
  263. skb_push(skb, -skb_network_offset(skb));
  264. if (xo->flags & XFRM_GSO_SEGMENT) {
  265. esp.esph->seq_no = htonl(seq);
  266. if (!skb_is_gso(skb))
  267. xo->seq.low++;
  268. else
  269. xo->seq.low += skb_shinfo(skb)->gso_segs;
  270. }
  271. if (xo->seq.low < seq)
  272. xo->seq.hi++;
  273. esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
  274. len = skb->len - sizeof(struct ipv6hdr);
  275. if (len > IPV6_MAXPLEN)
  276. len = 0;
  277. ipv6_hdr(skb)->payload_len = htons(len);
  278. if (hw_offload) {
  279. if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
  280. return -ENOMEM;
  281. xo = xfrm_offload(skb);
  282. if (!xo)
  283. return -EINVAL;
  284. xo->flags |= XFRM_XMIT;
  285. return 0;
  286. }
  287. err = esp6_output_tail(x, skb, &esp);
  288. if (err)
  289. return err;
  290. secpath_reset(skb);
  291. if (skb_needs_linearize(skb, skb->dev->features) &&
  292. __skb_linearize(skb))
  293. return -ENOMEM;
  294. return 0;
  295. }
  296. static const struct net_offload esp6_offload = {
  297. .callbacks = {
  298. .gro_receive = esp6_gro_receive,
  299. .gso_segment = esp6_gso_segment,
  300. },
  301. };
  302. static const struct xfrm_type_offload esp6_type_offload = {
  303. .owner = THIS_MODULE,
  304. .proto = IPPROTO_ESP,
  305. .input_tail = esp6_input_tail,
  306. .xmit = esp6_xmit,
  307. .encap = esp6_gso_encap,
  308. };
  309. static int __init esp6_offload_init(void)
  310. {
  311. if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
  312. pr_info("%s: can't add xfrm type offload\n", __func__);
  313. return -EAGAIN;
  314. }
  315. return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
  316. }
  317. static void __exit esp6_offload_exit(void)
  318. {
  319. xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6);
  320. inet6_del_offload(&esp6_offload, IPPROTO_ESP);
  321. }
  322. module_init(esp6_offload_init);
  323. module_exit(esp6_offload_exit);
  324. MODULE_LICENSE("GPL");
  325. MODULE_AUTHOR("Steffen Klassert <[email protected]>");
  326. MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
  327. MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");