xfrm_device.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * xfrm_device.c - IPsec device offloading code.
  4. *
  5. * Copyright (c) 2015 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <[email protected]>
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/module.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <net/dst.h>
  17. #include <net/xfrm.h>
  18. #include <linux/notifier.h>
  19. #ifdef CONFIG_XFRM_OFFLOAD
  20. static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
  21. unsigned int hsize)
  22. {
  23. struct xfrm_offload *xo = xfrm_offload(skb);
  24. skb_reset_mac_len(skb);
  25. if (xo->flags & XFRM_GSO_SEGMENT)
  26. skb->transport_header -= x->props.header_len;
  27. pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
  28. }
  29. static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
  30. unsigned int hsize)
  31. {
  32. struct xfrm_offload *xo = xfrm_offload(skb);
  33. if (xo->flags & XFRM_GSO_SEGMENT)
  34. skb->transport_header = skb->network_header + hsize;
  35. skb_reset_mac_len(skb);
  36. pskb_pull(skb, skb->mac_len + x->props.header_len);
  37. }
  38. static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
  39. unsigned int hsize)
  40. {
  41. struct xfrm_offload *xo = xfrm_offload(skb);
  42. int phlen = 0;
  43. if (xo->flags & XFRM_GSO_SEGMENT)
  44. skb->transport_header = skb->network_header + hsize;
  45. skb_reset_mac_len(skb);
  46. if (x->sel.family != AF_INET6) {
  47. phlen = IPV4_BEET_PHMAXLEN;
  48. if (x->outer_mode.family == AF_INET6)
  49. phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
  50. }
  51. pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
  52. }
  53. /* Adjust pointers into the packet when IPsec is done at layer2 */
  54. static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
  55. {
  56. switch (x->outer_mode.encap) {
  57. case XFRM_MODE_TUNNEL:
  58. if (x->outer_mode.family == AF_INET)
  59. return __xfrm_mode_tunnel_prep(x, skb,
  60. sizeof(struct iphdr));
  61. if (x->outer_mode.family == AF_INET6)
  62. return __xfrm_mode_tunnel_prep(x, skb,
  63. sizeof(struct ipv6hdr));
  64. break;
  65. case XFRM_MODE_TRANSPORT:
  66. if (x->outer_mode.family == AF_INET)
  67. return __xfrm_transport_prep(x, skb,
  68. sizeof(struct iphdr));
  69. if (x->outer_mode.family == AF_INET6)
  70. return __xfrm_transport_prep(x, skb,
  71. sizeof(struct ipv6hdr));
  72. break;
  73. case XFRM_MODE_BEET:
  74. if (x->outer_mode.family == AF_INET)
  75. return __xfrm_mode_beet_prep(x, skb,
  76. sizeof(struct iphdr));
  77. if (x->outer_mode.family == AF_INET6)
  78. return __xfrm_mode_beet_prep(x, skb,
  79. sizeof(struct ipv6hdr));
  80. break;
  81. case XFRM_MODE_ROUTEOPTIMIZATION:
  82. case XFRM_MODE_IN_TRIGGER:
  83. break;
  84. }
  85. }
  86. static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
  87. {
  88. struct xfrm_offload *xo = xfrm_offload(skb);
  89. __u32 seq = xo->seq.low;
  90. seq += skb_shinfo(skb)->gso_segs;
  91. if (unlikely(seq < xo->seq.low))
  92. return true;
  93. return false;
  94. }
  95. struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  96. {
  97. int err;
  98. unsigned long flags;
  99. struct xfrm_state *x;
  100. struct softnet_data *sd;
  101. struct sk_buff *skb2, *nskb, *pskb = NULL;
  102. netdev_features_t esp_features = features;
  103. struct xfrm_offload *xo = xfrm_offload(skb);
  104. struct net_device *dev = skb->dev;
  105. struct sec_path *sp;
  106. if (!xo || (xo->flags & XFRM_XMIT))
  107. return skb;
  108. if (!(features & NETIF_F_HW_ESP))
  109. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  110. sp = skb_sec_path(skb);
  111. x = sp->xvec[sp->len - 1];
  112. if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
  113. return skb;
  114. /* This skb was already validated on the upper/virtual dev */
  115. if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
  116. return skb;
  117. local_irq_save(flags);
  118. sd = this_cpu_ptr(&softnet_data);
  119. err = !skb_queue_empty(&sd->xfrm_backlog);
  120. local_irq_restore(flags);
  121. if (err) {
  122. *again = true;
  123. return skb;
  124. }
  125. if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
  126. unlikely(xmit_xfrm_check_overflow(skb)))) {
  127. struct sk_buff *segs;
  128. /* Packet got rerouted, fixup features and segment it. */
  129. esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
  130. segs = skb_gso_segment(skb, esp_features);
  131. if (IS_ERR(segs)) {
  132. kfree_skb(skb);
  133. dev_core_stats_tx_dropped_inc(dev);
  134. return NULL;
  135. } else {
  136. consume_skb(skb);
  137. skb = segs;
  138. }
  139. }
  140. if (!skb->next) {
  141. esp_features |= skb->dev->gso_partial_features;
  142. xfrm_outer_mode_prep(x, skb);
  143. xo->flags |= XFRM_DEV_RESUME;
  144. err = x->type_offload->xmit(x, skb, esp_features);
  145. if (err) {
  146. if (err == -EINPROGRESS)
  147. return NULL;
  148. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  149. kfree_skb(skb);
  150. return NULL;
  151. }
  152. skb_push(skb, skb->data - skb_mac_header(skb));
  153. return skb;
  154. }
  155. skb_list_walk_safe(skb, skb2, nskb) {
  156. esp_features |= skb->dev->gso_partial_features;
  157. skb_mark_not_on_list(skb2);
  158. xo = xfrm_offload(skb2);
  159. xo->flags |= XFRM_DEV_RESUME;
  160. xfrm_outer_mode_prep(x, skb2);
  161. err = x->type_offload->xmit(x, skb2, esp_features);
  162. if (!err) {
  163. skb2->next = nskb;
  164. } else if (err != -EINPROGRESS) {
  165. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  166. skb2->next = nskb;
  167. kfree_skb_list(skb2);
  168. return NULL;
  169. } else {
  170. if (skb == skb2)
  171. skb = nskb;
  172. else
  173. pskb->next = nskb;
  174. continue;
  175. }
  176. skb_push(skb2, skb2->data - skb_mac_header(skb2));
  177. pskb = skb2;
  178. }
  179. return skb;
  180. }
  181. EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
  182. int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
  183. struct xfrm_user_offload *xuo,
  184. struct netlink_ext_ack *extack)
  185. {
  186. int err;
  187. struct dst_entry *dst;
  188. struct net_device *dev;
  189. struct xfrm_dev_offload *xso = &x->xso;
  190. xfrm_address_t *saddr;
  191. xfrm_address_t *daddr;
  192. if (!x->type_offload) {
  193. NL_SET_ERR_MSG(extack, "Type doesn't support offload");
  194. return -EINVAL;
  195. }
  196. /* We don't yet support UDP encapsulation and TFC padding. */
  197. if (x->encap || x->tfcpad) {
  198. NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
  199. return -EINVAL;
  200. }
  201. if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) {
  202. NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
  203. return -EINVAL;
  204. }
  205. dev = dev_get_by_index(net, xuo->ifindex);
  206. if (!dev) {
  207. if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
  208. saddr = &x->props.saddr;
  209. daddr = &x->id.daddr;
  210. } else {
  211. saddr = &x->id.daddr;
  212. daddr = &x->props.saddr;
  213. }
  214. dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
  215. x->props.family,
  216. xfrm_smark_get(0, x));
  217. if (IS_ERR(dst))
  218. return 0;
  219. dev = dst->dev;
  220. dev_hold(dev);
  221. dst_release(dst);
  222. }
  223. if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
  224. xso->dev = NULL;
  225. dev_put(dev);
  226. return 0;
  227. }
  228. if (x->props.flags & XFRM_STATE_ESN &&
  229. !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
  230. NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
  231. xso->dev = NULL;
  232. dev_put(dev);
  233. return -EINVAL;
  234. }
  235. xso->dev = dev;
  236. netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
  237. xso->real_dev = dev;
  238. if (xuo->flags & XFRM_OFFLOAD_INBOUND)
  239. xso->dir = XFRM_DEV_OFFLOAD_IN;
  240. else
  241. xso->dir = XFRM_DEV_OFFLOAD_OUT;
  242. err = dev->xfrmdev_ops->xdo_dev_state_add(x);
  243. if (err) {
  244. xso->dev = NULL;
  245. xso->dir = 0;
  246. xso->real_dev = NULL;
  247. netdev_put(dev, &xso->dev_tracker);
  248. if (err != -EOPNOTSUPP) {
  249. NL_SET_ERR_MSG(extack, "Device failed to offload this state");
  250. return err;
  251. }
  252. }
  253. return 0;
  254. }
  255. EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
  256. bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  257. {
  258. int mtu;
  259. struct dst_entry *dst = skb_dst(skb);
  260. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  261. struct net_device *dev = x->xso.dev;
  262. if (!x->type_offload || x->encap)
  263. return false;
  264. if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
  265. (!xdst->child->xfrm)) {
  266. mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
  267. if (skb->len <= mtu)
  268. goto ok;
  269. if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  270. goto ok;
  271. }
  272. return false;
  273. ok:
  274. if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
  275. return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
  276. return true;
  277. }
  278. EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
  279. void xfrm_dev_resume(struct sk_buff *skb)
  280. {
  281. struct net_device *dev = skb->dev;
  282. int ret = NETDEV_TX_BUSY;
  283. struct netdev_queue *txq;
  284. struct softnet_data *sd;
  285. unsigned long flags;
  286. rcu_read_lock();
  287. txq = netdev_core_pick_tx(dev, skb, NULL);
  288. HARD_TX_LOCK(dev, txq, smp_processor_id());
  289. if (!netif_xmit_frozen_or_stopped(txq))
  290. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  291. HARD_TX_UNLOCK(dev, txq);
  292. if (!dev_xmit_complete(ret)) {
  293. local_irq_save(flags);
  294. sd = this_cpu_ptr(&softnet_data);
  295. skb_queue_tail(&sd->xfrm_backlog, skb);
  296. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  297. local_irq_restore(flags);
  298. }
  299. rcu_read_unlock();
  300. }
  301. EXPORT_SYMBOL_GPL(xfrm_dev_resume);
  302. void xfrm_dev_backlog(struct softnet_data *sd)
  303. {
  304. struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
  305. struct sk_buff_head list;
  306. struct sk_buff *skb;
  307. if (skb_queue_empty(xfrm_backlog))
  308. return;
  309. __skb_queue_head_init(&list);
  310. spin_lock(&xfrm_backlog->lock);
  311. skb_queue_splice_init(xfrm_backlog, &list);
  312. spin_unlock(&xfrm_backlog->lock);
  313. while (!skb_queue_empty(&list)) {
  314. skb = __skb_dequeue(&list);
  315. xfrm_dev_resume(skb);
  316. }
  317. }
  318. #endif
  319. static int xfrm_api_check(struct net_device *dev)
  320. {
  321. #ifdef CONFIG_XFRM_OFFLOAD
  322. if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
  323. !(dev->features & NETIF_F_HW_ESP))
  324. return NOTIFY_BAD;
  325. if ((dev->features & NETIF_F_HW_ESP) &&
  326. (!(dev->xfrmdev_ops &&
  327. dev->xfrmdev_ops->xdo_dev_state_add &&
  328. dev->xfrmdev_ops->xdo_dev_state_delete)))
  329. return NOTIFY_BAD;
  330. #else
  331. if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
  332. return NOTIFY_BAD;
  333. #endif
  334. return NOTIFY_DONE;
  335. }
  336. static int xfrm_dev_down(struct net_device *dev)
  337. {
  338. if (dev->features & NETIF_F_HW_ESP)
  339. xfrm_dev_state_flush(dev_net(dev), dev, true);
  340. return NOTIFY_DONE;
  341. }
  342. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  343. {
  344. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  345. switch (event) {
  346. case NETDEV_REGISTER:
  347. return xfrm_api_check(dev);
  348. case NETDEV_FEAT_CHANGE:
  349. return xfrm_api_check(dev);
  350. case NETDEV_DOWN:
  351. case NETDEV_UNREGISTER:
  352. return xfrm_dev_down(dev);
  353. }
  354. return NOTIFY_DONE;
  355. }
  356. static struct notifier_block xfrm_dev_notifier = {
  357. .notifier_call = xfrm_dev_event,
  358. };
  359. void __init xfrm_dev_init(void)
  360. {
  361. register_netdevice_notifier(&xfrm_dev_notifier);
  362. }