ipsec.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
  3. #include "ixgbevf.h"
  4. #include <net/xfrm.h>
  5. #include <crypto/aead.h>
  6. #define IXGBE_IPSEC_KEY_BITS 160
  7. static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
  8. /**
  9. * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA
  10. * @adapter: board private structure
  11. * @xs: xfrm info to be sent to the PF
  12. *
  13. * Returns: positive offload handle from the PF, or negative error code
  14. **/
  15. static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
  16. struct xfrm_state *xs)
  17. {
  18. u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
  19. struct ixgbe_hw *hw = &adapter->hw;
  20. struct sa_mbx_msg *sam;
  21. int ret;
  22. /* send the important bits to the PF */
  23. sam = (struct sa_mbx_msg *)(&msgbuf[1]);
  24. sam->dir = xs->xso.dir;
  25. sam->spi = xs->id.spi;
  26. sam->proto = xs->id.proto;
  27. sam->family = xs->props.family;
  28. if (xs->props.family == AF_INET6)
  29. memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
  30. else
  31. memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
  32. memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
  33. msgbuf[0] = IXGBE_VF_IPSEC_ADD;
  34. spin_lock_bh(&adapter->mbx_lock);
  35. ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
  36. if (ret)
  37. goto out;
  38. ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
  39. if (ret)
  40. goto out;
  41. ret = (int)msgbuf[1];
  42. if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
  43. ret = -1;
  44. out:
  45. spin_unlock_bh(&adapter->mbx_lock);
  46. return ret;
  47. }
  48. /**
  49. * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA
  50. * @adapter: board private structure
  51. * @pfsa: sa index returned from PF when created, -1 for all
  52. *
  53. * Returns: 0 on success, or negative error code
  54. **/
  55. static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
  56. {
  57. struct ixgbe_hw *hw = &adapter->hw;
  58. u32 msgbuf[2];
  59. int err;
  60. memset(msgbuf, 0, sizeof(msgbuf));
  61. msgbuf[0] = IXGBE_VF_IPSEC_DEL;
  62. msgbuf[1] = (u32)pfsa;
  63. spin_lock_bh(&adapter->mbx_lock);
  64. err = ixgbevf_write_mbx(hw, msgbuf, 2);
  65. if (err)
  66. goto out;
  67. err = ixgbevf_poll_mbx(hw, msgbuf, 2);
  68. if (err)
  69. goto out;
  70. out:
  71. spin_unlock_bh(&adapter->mbx_lock);
  72. return err;
  73. }
  74. /**
  75. * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset
  76. * @adapter: board private structure
  77. *
  78. * Reload the HW tables from the SW tables after they've been bashed
  79. * by a chip reset. While we're here, make sure any stale VF data is
  80. * removed, since we go through reset when num_vfs changes.
  81. **/
  82. void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
  83. {
  84. struct ixgbevf_ipsec *ipsec = adapter->ipsec;
  85. struct net_device *netdev = adapter->netdev;
  86. int i;
  87. if (!(adapter->netdev->features & NETIF_F_HW_ESP))
  88. return;
  89. /* reload the Rx and Tx keys */
  90. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  91. struct rx_sa *r = &ipsec->rx_tbl[i];
  92. struct tx_sa *t = &ipsec->tx_tbl[i];
  93. int ret;
  94. if (r->used) {
  95. ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
  96. if (ret < 0)
  97. netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
  98. i, ret);
  99. }
  100. if (t->used) {
  101. ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
  102. if (ret < 0)
  103. netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
  104. i, ret);
  105. }
  106. }
  107. }
  108. /**
  109. * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index
  110. * @ipsec: pointer to IPsec struct
  111. * @rxtable: true if we need to look in the Rx table
  112. *
  113. * Returns the first unused index in either the Rx or Tx SA table
  114. **/
  115. static
  116. int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
  117. {
  118. u32 i;
  119. if (rxtable) {
  120. if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  121. return -ENOSPC;
  122. /* search rx sa table */
  123. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  124. if (!ipsec->rx_tbl[i].used)
  125. return i;
  126. }
  127. } else {
  128. if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
  129. return -ENOSPC;
  130. /* search tx sa table */
  131. for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
  132. if (!ipsec->tx_tbl[i].used)
  133. return i;
  134. }
  135. }
  136. return -ENOSPC;
  137. }
  138. /**
  139. * ixgbevf_ipsec_find_rx_state - find the state that matches
  140. * @ipsec: pointer to IPsec struct
  141. * @daddr: inbound address to match
  142. * @proto: protocol to match
  143. * @spi: SPI to match
  144. * @ip4: true if using an IPv4 address
  145. *
  146. * Returns a pointer to the matching SA state information
  147. **/
  148. static
  149. struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
  150. __be32 *daddr, u8 proto,
  151. __be32 spi, bool ip4)
  152. {
  153. struct xfrm_state *ret = NULL;
  154. struct rx_sa *rsa;
  155. rcu_read_lock();
  156. hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
  157. (__force u32)spi) {
  158. if (spi == rsa->xs->id.spi &&
  159. ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
  160. (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
  161. sizeof(rsa->xs->id.daddr.a6)))) &&
  162. proto == rsa->xs->id.proto) {
  163. ret = rsa->xs;
  164. xfrm_state_hold(ret);
  165. break;
  166. }
  167. }
  168. rcu_read_unlock();
  169. return ret;
  170. }
  171. /**
  172. * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
  173. * @xs: pointer to xfrm_state struct
  174. * @mykey: pointer to key array to populate
  175. * @mysalt: pointer to salt value to populate
  176. *
  177. * This copies the protocol keys and salt to our own data tables. The
  178. * 82599 family only supports the one algorithm.
  179. **/
  180. static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
  181. u32 *mykey, u32 *mysalt)
  182. {
  183. struct net_device *dev = xs->xso.real_dev;
  184. unsigned char *key_data;
  185. char *alg_name = NULL;
  186. int key_len;
  187. if (!xs->aead) {
  188. netdev_err(dev, "Unsupported IPsec algorithm\n");
  189. return -EINVAL;
  190. }
  191. if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
  192. netdev_err(dev, "IPsec offload requires %d bit authentication\n",
  193. IXGBE_IPSEC_AUTH_BITS);
  194. return -EINVAL;
  195. }
  196. key_data = &xs->aead->alg_key[0];
  197. key_len = xs->aead->alg_key_len;
  198. alg_name = xs->aead->alg_name;
  199. if (strcmp(alg_name, aes_gcm_name)) {
  200. netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
  201. aes_gcm_name);
  202. return -EINVAL;
  203. }
  204. /* The key bytes come down in a big endian array of bytes, so
  205. * we don't need to do any byte swapping.
  206. * 160 accounts for 16 byte key and 4 byte salt
  207. */
  208. if (key_len > IXGBE_IPSEC_KEY_BITS) {
  209. *mysalt = ((u32 *)key_data)[4];
  210. } else if (key_len == IXGBE_IPSEC_KEY_BITS) {
  211. *mysalt = 0;
  212. } else {
  213. netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
  214. return -EINVAL;
  215. }
  216. memcpy(mykey, key_data, 16);
  217. return 0;
  218. }
  219. /**
  220. * ixgbevf_ipsec_add_sa - program device with a security association
  221. * @xs: pointer to transformer state struct
  222. **/
  223. static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
  224. {
  225. struct net_device *dev = xs->xso.real_dev;
  226. struct ixgbevf_adapter *adapter;
  227. struct ixgbevf_ipsec *ipsec;
  228. u16 sa_idx;
  229. int ret;
  230. adapter = netdev_priv(dev);
  231. ipsec = adapter->ipsec;
  232. if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
  233. netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
  234. xs->id.proto);
  235. return -EINVAL;
  236. }
  237. if (xs->props.mode != XFRM_MODE_TRANSPORT) {
  238. netdev_err(dev, "Unsupported mode for ipsec offload\n");
  239. return -EINVAL;
  240. }
  241. if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
  242. struct rx_sa rsa;
  243. if (xs->calg) {
  244. netdev_err(dev, "Compression offload not supported\n");
  245. return -EINVAL;
  246. }
  247. /* find the first unused index */
  248. ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
  249. if (ret < 0) {
  250. netdev_err(dev, "No space for SA in Rx table!\n");
  251. return ret;
  252. }
  253. sa_idx = (u16)ret;
  254. memset(&rsa, 0, sizeof(rsa));
  255. rsa.used = true;
  256. rsa.xs = xs;
  257. if (rsa.xs->id.proto & IPPROTO_ESP)
  258. rsa.decrypt = xs->ealg || xs->aead;
  259. /* get the key and salt */
  260. ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
  261. if (ret) {
  262. netdev_err(dev, "Failed to get key data for Rx SA table\n");
  263. return ret;
  264. }
  265. /* get ip for rx sa table */
  266. if (xs->props.family == AF_INET6)
  267. memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
  268. else
  269. memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
  270. rsa.mode = IXGBE_RXMOD_VALID;
  271. if (rsa.xs->id.proto & IPPROTO_ESP)
  272. rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
  273. if (rsa.decrypt)
  274. rsa.mode |= IXGBE_RXMOD_DECRYPT;
  275. if (rsa.xs->props.family == AF_INET6)
  276. rsa.mode |= IXGBE_RXMOD_IPV6;
  277. ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
  278. if (ret < 0)
  279. return ret;
  280. rsa.pfsa = ret;
  281. /* the preparations worked, so save the info */
  282. memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
  283. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
  284. ipsec->num_rx_sa++;
  285. /* hash the new entry for faster search in Rx path */
  286. hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
  287. (__force u32)rsa.xs->id.spi);
  288. } else {
  289. struct tx_sa tsa;
  290. /* find the first unused index */
  291. ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
  292. if (ret < 0) {
  293. netdev_err(dev, "No space for SA in Tx table\n");
  294. return ret;
  295. }
  296. sa_idx = (u16)ret;
  297. memset(&tsa, 0, sizeof(tsa));
  298. tsa.used = true;
  299. tsa.xs = xs;
  300. if (xs->id.proto & IPPROTO_ESP)
  301. tsa.encrypt = xs->ealg || xs->aead;
  302. ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
  303. if (ret) {
  304. netdev_err(dev, "Failed to get key data for Tx SA table\n");
  305. memset(&tsa, 0, sizeof(tsa));
  306. return ret;
  307. }
  308. ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
  309. if (ret < 0)
  310. return ret;
  311. tsa.pfsa = ret;
  312. /* the preparations worked, so save the info */
  313. memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
  314. xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
  315. ipsec->num_tx_sa++;
  316. }
  317. return 0;
  318. }
  319. /**
  320. * ixgbevf_ipsec_del_sa - clear out this specific SA
  321. * @xs: pointer to transformer state struct
  322. **/
  323. static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
  324. {
  325. struct net_device *dev = xs->xso.real_dev;
  326. struct ixgbevf_adapter *adapter;
  327. struct ixgbevf_ipsec *ipsec;
  328. u16 sa_idx;
  329. adapter = netdev_priv(dev);
  330. ipsec = adapter->ipsec;
  331. if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
  332. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
  333. if (!ipsec->rx_tbl[sa_idx].used) {
  334. netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
  335. sa_idx, xs->xso.offload_handle);
  336. return;
  337. }
  338. ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
  339. hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
  340. memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
  341. ipsec->num_rx_sa--;
  342. } else {
  343. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  344. if (!ipsec->tx_tbl[sa_idx].used) {
  345. netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
  346. sa_idx, xs->xso.offload_handle);
  347. return;
  348. }
  349. ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
  350. memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
  351. ipsec->num_tx_sa--;
  352. }
  353. }
  354. /**
  355. * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
  356. * @skb: current data packet
  357. * @xs: pointer to transformer state struct
  358. **/
  359. static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
  360. {
  361. if (xs->props.family == AF_INET) {
  362. /* Offload with IPv4 options is not supported yet */
  363. if (ip_hdr(skb)->ihl != 5)
  364. return false;
  365. } else {
  366. /* Offload with IPv6 extension headers is not support yet */
  367. if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
  368. return false;
  369. }
  370. return true;
  371. }
  372. static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
  373. .xdo_dev_state_add = ixgbevf_ipsec_add_sa,
  374. .xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
  375. .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
  376. };
  377. /**
  378. * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload
  379. * @tx_ring: outgoing context
  380. * @first: current data packet
  381. * @itd: ipsec Tx data for later use in building context descriptor
  382. **/
  383. int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
  384. struct ixgbevf_tx_buffer *first,
  385. struct ixgbevf_ipsec_tx_data *itd)
  386. {
  387. struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
  388. struct ixgbevf_ipsec *ipsec = adapter->ipsec;
  389. struct xfrm_state *xs;
  390. struct sec_path *sp;
  391. struct tx_sa *tsa;
  392. u16 sa_idx;
  393. sp = skb_sec_path(first->skb);
  394. if (unlikely(!sp->len)) {
  395. netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
  396. __func__, sp->len);
  397. return 0;
  398. }
  399. xs = xfrm_input_state(first->skb);
  400. if (unlikely(!xs)) {
  401. netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
  402. __func__, xs);
  403. return 0;
  404. }
  405. sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
  406. if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
  407. netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
  408. __func__, sa_idx, xs->xso.offload_handle);
  409. return 0;
  410. }
  411. tsa = &ipsec->tx_tbl[sa_idx];
  412. if (unlikely(!tsa->used)) {
  413. netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
  414. __func__, sa_idx);
  415. return 0;
  416. }
  417. itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
  418. first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
  419. if (xs->id.proto == IPPROTO_ESP) {
  420. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
  421. IXGBE_ADVTXD_TUCMD_L4T_TCP;
  422. if (first->protocol == htons(ETH_P_IP))
  423. itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
  424. /* The actual trailer length is authlen (16 bytes) plus
  425. * 2 bytes for the proto and the padlen values, plus
  426. * padlen bytes of padding. This ends up not the same
  427. * as the static value found in xs->props.trailer_len (21).
  428. *
  429. * ... but if we're doing GSO, don't bother as the stack
  430. * doesn't add a trailer for those.
  431. */
  432. if (!skb_is_gso(first->skb)) {
  433. /* The "correct" way to get the auth length would be
  434. * to use
  435. * authlen = crypto_aead_authsize(xs->data);
  436. * but since we know we only have one size to worry
  437. * about * we can let the compiler use the constant
  438. * and save us a few CPU cycles.
  439. */
  440. const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
  441. struct sk_buff *skb = first->skb;
  442. u8 padlen;
  443. int ret;
  444. ret = skb_copy_bits(skb, skb->len - (authlen + 2),
  445. &padlen, 1);
  446. if (unlikely(ret))
  447. return 0;
  448. itd->trailer_len = authlen + 2 + padlen;
  449. }
  450. }
  451. if (tsa->encrypt)
  452. itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
  453. return 1;
  454. }
  455. /**
  456. * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor
  457. * @rx_ring: receiving ring
  458. * @rx_desc: receive data descriptor
  459. * @skb: current data packet
  460. *
  461. * Determine if there was an IPsec encapsulation noticed, and if so set up
  462. * the resulting status for later in the receive stack.
  463. **/
  464. void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
  465. union ixgbe_adv_rx_desc *rx_desc,
  466. struct sk_buff *skb)
  467. {
  468. struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
  469. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  470. __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
  471. IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
  472. struct ixgbevf_ipsec *ipsec = adapter->ipsec;
  473. struct xfrm_offload *xo = NULL;
  474. struct xfrm_state *xs = NULL;
  475. struct ipv6hdr *ip6 = NULL;
  476. struct iphdr *ip4 = NULL;
  477. struct sec_path *sp;
  478. void *daddr;
  479. __be32 spi;
  480. u8 *c_hdr;
  481. u8 proto;
  482. /* Find the IP and crypto headers in the data.
  483. * We can assume no VLAN header in the way, b/c the
  484. * hw won't recognize the IPsec packet and anyway the
  485. * currently VLAN device doesn't support xfrm offload.
  486. */
  487. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
  488. ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
  489. daddr = &ip4->daddr;
  490. c_hdr = (u8 *)ip4 + ip4->ihl * 4;
  491. } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
  492. ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
  493. daddr = &ip6->daddr;
  494. c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
  495. } else {
  496. return;
  497. }
  498. switch (pkt_info & ipsec_pkt_types) {
  499. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
  500. spi = ((struct ip_auth_hdr *)c_hdr)->spi;
  501. proto = IPPROTO_AH;
  502. break;
  503. case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
  504. spi = ((struct ip_esp_hdr *)c_hdr)->spi;
  505. proto = IPPROTO_ESP;
  506. break;
  507. default:
  508. return;
  509. }
  510. xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
  511. if (unlikely(!xs))
  512. return;
  513. sp = secpath_set(skb);
  514. if (unlikely(!sp))
  515. return;
  516. sp->xvec[sp->len++] = xs;
  517. sp->olen++;
  518. xo = xfrm_offload(skb);
  519. xo->flags = CRYPTO_DONE;
  520. xo->status = CRYPTO_SUCCESS;
  521. adapter->rx_ipsec++;
  522. }
  523. /**
  524. * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation
  525. * @adapter: board private structure
  526. **/
  527. void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
  528. {
  529. struct ixgbevf_ipsec *ipsec;
  530. size_t size;
  531. switch (adapter->hw.api_version) {
  532. case ixgbe_mbox_api_14:
  533. case ixgbe_mbox_api_15:
  534. break;
  535. default:
  536. return;
  537. }
  538. ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
  539. if (!ipsec)
  540. goto err1;
  541. hash_init(ipsec->rx_sa_list);
  542. size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  543. ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
  544. if (!ipsec->rx_tbl)
  545. goto err2;
  546. size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
  547. ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
  548. if (!ipsec->tx_tbl)
  549. goto err2;
  550. ipsec->num_rx_sa = 0;
  551. ipsec->num_tx_sa = 0;
  552. adapter->ipsec = ipsec;
  553. adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
  554. #define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
  555. NETIF_F_HW_ESP_TX_CSUM | \
  556. NETIF_F_GSO_ESP)
  557. adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
  558. adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
  559. return;
  560. err2:
  561. kfree(ipsec->rx_tbl);
  562. kfree(ipsec->tx_tbl);
  563. kfree(ipsec);
  564. err1:
  565. netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
  566. }
  567. /**
  568. * ixgbevf_stop_ipsec_offload - tear down the IPsec offload
  569. * @adapter: board private structure
  570. **/
  571. void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
  572. {
  573. struct ixgbevf_ipsec *ipsec = adapter->ipsec;
  574. adapter->ipsec = NULL;
  575. if (ipsec) {
  576. kfree(ipsec->rx_tbl);
  577. kfree(ipsec->tx_tbl);
  578. kfree(ipsec);
  579. }
  580. }