esp4_offload.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * IPV4 GSO/GRO offload support
  4. * Linux INET implementation
  5. *
  6. * Copyright (C) 2016 secunet Security Networks AG
  7. * Author: Steffen Klassert <[email protected]>
  8. *
  9. * ESP GRO support
  10. */
  11. #include <linux/skbuff.h>
  12. #include <linux/init.h>
  13. #include <net/protocol.h>
  14. #include <crypto/aead.h>
  15. #include <crypto/authenc.h>
  16. #include <linux/err.h>
  17. #include <linux/module.h>
  18. #include <net/gro.h>
  19. #include <net/ip.h>
  20. #include <net/xfrm.h>
  21. #include <net/esp.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/kernel.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <net/udp.h>
  27. static struct sk_buff *esp4_gro_receive(struct list_head *head,
  28. struct sk_buff *skb)
  29. {
  30. int offset = skb_gro_offset(skb);
  31. struct xfrm_offload *xo;
  32. struct xfrm_state *x;
  33. __be32 seq;
  34. __be32 spi;
  35. if (!pskb_pull(skb, offset))
  36. return NULL;
  37. if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
  38. goto out;
  39. xo = xfrm_offload(skb);
  40. if (!xo || !(xo->flags & CRYPTO_DONE)) {
  41. struct sec_path *sp = secpath_set(skb);
  42. if (!sp)
  43. goto out;
  44. if (sp->len == XFRM_MAX_DEPTH)
  45. goto out_reset;
  46. x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
  47. (xfrm_address_t *)&ip_hdr(skb)->daddr,
  48. spi, IPPROTO_ESP, AF_INET);
  49. if (!x)
  50. goto out_reset;
  51. skb->mark = xfrm_smark_get(skb->mark, x);
  52. sp->xvec[sp->len++] = x;
  53. sp->olen++;
  54. xo = xfrm_offload(skb);
  55. if (!xo)
  56. goto out_reset;
  57. }
  58. xo->flags |= XFRM_GRO;
  59. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
  60. XFRM_SPI_SKB_CB(skb)->family = AF_INET;
  61. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
  62. XFRM_SPI_SKB_CB(skb)->seq = seq;
  63. /* We don't need to handle errors from xfrm_input, it does all
  64. * the error handling and frees the resources on error. */
  65. xfrm_input(skb, IPPROTO_ESP, spi, -2);
  66. return ERR_PTR(-EINPROGRESS);
  67. out_reset:
  68. secpath_reset(skb);
  69. out:
  70. skb_push(skb, offset);
  71. NAPI_GRO_CB(skb)->same_flow = 0;
  72. NAPI_GRO_CB(skb)->flush = 1;
  73. return NULL;
  74. }
  75. static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
  76. {
  77. struct ip_esp_hdr *esph;
  78. struct iphdr *iph = ip_hdr(skb);
  79. struct xfrm_offload *xo = xfrm_offload(skb);
  80. int proto = iph->protocol;
  81. skb_push(skb, -skb_network_offset(skb));
  82. esph = ip_esp_hdr(skb);
  83. *skb_mac_header(skb) = IPPROTO_ESP;
  84. esph->spi = x->id.spi;
  85. esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  86. xo->proto = proto;
  87. }
  88. static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
  89. struct sk_buff *skb,
  90. netdev_features_t features)
  91. {
  92. __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
  93. : htons(ETH_P_IP);
  94. return skb_eth_gso_segment(skb, features, type);
  95. }
  96. static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
  97. struct sk_buff *skb,
  98. netdev_features_t features)
  99. {
  100. const struct net_offload *ops;
  101. struct sk_buff *segs = ERR_PTR(-EINVAL);
  102. struct xfrm_offload *xo = xfrm_offload(skb);
  103. skb->transport_header += x->props.header_len;
  104. ops = rcu_dereference(inet_offloads[xo->proto]);
  105. if (likely(ops && ops->callbacks.gso_segment))
  106. segs = ops->callbacks.gso_segment(skb, features);
  107. return segs;
  108. }
  109. static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
  110. struct sk_buff *skb,
  111. netdev_features_t features)
  112. {
  113. struct xfrm_offload *xo = xfrm_offload(skb);
  114. struct sk_buff *segs = ERR_PTR(-EINVAL);
  115. const struct net_offload *ops;
  116. u8 proto = xo->proto;
  117. skb->transport_header += x->props.header_len;
  118. if (x->sel.family != AF_INET6) {
  119. if (proto == IPPROTO_BEETPH) {
  120. struct ip_beet_phdr *ph =
  121. (struct ip_beet_phdr *)skb->data;
  122. skb->transport_header += ph->hdrlen * 8;
  123. proto = ph->nexthdr;
  124. } else {
  125. skb->transport_header -= IPV4_BEET_PHMAXLEN;
  126. }
  127. } else {
  128. __be16 frag;
  129. skb->transport_header +=
  130. ipv6_skip_exthdr(skb, 0, &proto, &frag);
  131. if (proto == IPPROTO_TCP)
  132. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
  133. }
  134. if (proto == IPPROTO_IPV6)
  135. skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
  136. __skb_pull(skb, skb_transport_offset(skb));
  137. ops = rcu_dereference(inet_offloads[proto]);
  138. if (likely(ops && ops->callbacks.gso_segment))
  139. segs = ops->callbacks.gso_segment(skb, features);
  140. return segs;
  141. }
  142. static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
  143. struct sk_buff *skb,
  144. netdev_features_t features)
  145. {
  146. switch (x->outer_mode.encap) {
  147. case XFRM_MODE_TUNNEL:
  148. return xfrm4_tunnel_gso_segment(x, skb, features);
  149. case XFRM_MODE_TRANSPORT:
  150. return xfrm4_transport_gso_segment(x, skb, features);
  151. case XFRM_MODE_BEET:
  152. return xfrm4_beet_gso_segment(x, skb, features);
  153. }
  154. return ERR_PTR(-EOPNOTSUPP);
  155. }
  156. static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
  157. netdev_features_t features)
  158. {
  159. struct xfrm_state *x;
  160. struct ip_esp_hdr *esph;
  161. struct crypto_aead *aead;
  162. netdev_features_t esp_features = features;
  163. struct xfrm_offload *xo = xfrm_offload(skb);
  164. struct sec_path *sp;
  165. if (!xo)
  166. return ERR_PTR(-EINVAL);
  167. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
  168. return ERR_PTR(-EINVAL);
  169. sp = skb_sec_path(skb);
  170. x = sp->xvec[sp->len - 1];
  171. aead = x->data;
  172. esph = ip_esp_hdr(skb);
  173. if (esph->spi != x->id.spi)
  174. return ERR_PTR(-EINVAL);
  175. if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
  176. return ERR_PTR(-EINVAL);
  177. __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
  178. skb->encap_hdr_csum = 1;
  179. if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
  180. !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
  181. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
  182. NETIF_F_SCTP_CRC);
  183. else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
  184. !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
  185. esp_features = features & ~(NETIF_F_CSUM_MASK |
  186. NETIF_F_SCTP_CRC);
  187. xo->flags |= XFRM_GSO_SEGMENT;
  188. return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
  189. }
  190. static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
  191. {
  192. struct crypto_aead *aead = x->data;
  193. struct xfrm_offload *xo = xfrm_offload(skb);
  194. if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
  195. return -EINVAL;
  196. if (!(xo->flags & CRYPTO_DONE))
  197. skb->ip_summed = CHECKSUM_NONE;
  198. return esp_input_done2(skb, 0);
  199. }
  200. static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
  201. {
  202. int err;
  203. int alen;
  204. int blksize;
  205. struct xfrm_offload *xo;
  206. struct ip_esp_hdr *esph;
  207. struct crypto_aead *aead;
  208. struct esp_info esp;
  209. bool hw_offload = true;
  210. __u32 seq;
  211. esp.inplace = true;
  212. xo = xfrm_offload(skb);
  213. if (!xo)
  214. return -EINVAL;
  215. if ((!(features & NETIF_F_HW_ESP) &&
  216. !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
  217. x->xso.dev != skb->dev) {
  218. xo->flags |= CRYPTO_FALLBACK;
  219. hw_offload = false;
  220. }
  221. esp.proto = xo->proto;
  222. /* skb is pure payload to encrypt */
  223. aead = x->data;
  224. alen = crypto_aead_authsize(aead);
  225. esp.tfclen = 0;
  226. /* XXX: Add support for tfc padding here. */
  227. blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  228. esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
  229. esp.plen = esp.clen - skb->len - esp.tfclen;
  230. esp.tailen = esp.tfclen + esp.plen + alen;
  231. esp.esph = ip_esp_hdr(skb);
  232. if (!hw_offload || !skb_is_gso(skb)) {
  233. esp.nfrags = esp_output_head(x, skb, &esp);
  234. if (esp.nfrags < 0)
  235. return esp.nfrags;
  236. }
  237. seq = xo->seq.low;
  238. esph = esp.esph;
  239. esph->spi = x->id.spi;
  240. skb_push(skb, -skb_network_offset(skb));
  241. if (xo->flags & XFRM_GSO_SEGMENT) {
  242. esph->seq_no = htonl(seq);
  243. if (!skb_is_gso(skb))
  244. xo->seq.low++;
  245. else
  246. xo->seq.low += skb_shinfo(skb)->gso_segs;
  247. }
  248. if (xo->seq.low < seq)
  249. xo->seq.hi++;
  250. esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
  251. ip_hdr(skb)->tot_len = htons(skb->len);
  252. ip_send_check(ip_hdr(skb));
  253. if (hw_offload) {
  254. if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
  255. return -ENOMEM;
  256. xo = xfrm_offload(skb);
  257. if (!xo)
  258. return -EINVAL;
  259. xo->flags |= XFRM_XMIT;
  260. return 0;
  261. }
  262. err = esp_output_tail(x, skb, &esp);
  263. if (err)
  264. return err;
  265. secpath_reset(skb);
  266. if (skb_needs_linearize(skb, skb->dev->features) &&
  267. __skb_linearize(skb))
  268. return -ENOMEM;
  269. return 0;
  270. }
  271. static const struct net_offload esp4_offload = {
  272. .callbacks = {
  273. .gro_receive = esp4_gro_receive,
  274. .gso_segment = esp4_gso_segment,
  275. },
  276. };
  277. static const struct xfrm_type_offload esp_type_offload = {
  278. .owner = THIS_MODULE,
  279. .proto = IPPROTO_ESP,
  280. .input_tail = esp_input_tail,
  281. .xmit = esp_xmit,
  282. .encap = esp4_gso_encap,
  283. };
  284. static int __init esp4_offload_init(void)
  285. {
  286. if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
  287. pr_info("%s: can't add xfrm type offload\n", __func__);
  288. return -EAGAIN;
  289. }
  290. return inet_add_offload(&esp4_offload, IPPROTO_ESP);
  291. }
  292. static void __exit esp4_offload_exit(void)
  293. {
  294. xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
  295. inet_del_offload(&esp4_offload, IPPROTO_ESP);
  296. }
  297. module_init(esp4_offload_init);
  298. module_exit(esp4_offload_exit);
  299. MODULE_LICENSE("GPL");
  300. MODULE_AUTHOR("Steffen Klassert <[email protected]>");
  301. MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
  302. MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");