ah6.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C)2002 USAGI/WIDE Project
  4. *
  5. * Authors
  6. *
  7. * Mitsuru KANDA @USAGI : IPv6 Support
  8. * Kazunori MIYAZAWA @USAGI :
  9. * Kunihiro Ishiguro <[email protected]>
  10. *
  11. * This file is derived from net/ipv4/ah.c.
  12. */
  13. #define pr_fmt(fmt) "IPv6: " fmt
  14. #include <crypto/algapi.h>
  15. #include <crypto/hash.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <net/ip.h>
  19. #include <net/ah.h>
  20. #include <linux/crypto.h>
  21. #include <linux/pfkeyv2.h>
  22. #include <linux/string.h>
  23. #include <linux/scatterlist.h>
  24. #include <net/ip6_route.h>
  25. #include <net/icmp.h>
  26. #include <net/ipv6.h>
  27. #include <net/protocol.h>
  28. #include <net/xfrm.h>
  29. #define IPV6HDR_BASELEN 8
  30. struct tmp_ext {
  31. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  32. struct in6_addr saddr;
  33. #endif
  34. struct in6_addr daddr;
  35. char hdrs[];
  36. };
  37. struct ah_skb_cb {
  38. struct xfrm_skb_cb xfrm;
  39. void *tmp;
  40. };
  41. #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
  42. static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
  43. unsigned int size)
  44. {
  45. unsigned int len;
  46. len = size + crypto_ahash_digestsize(ahash) +
  47. (crypto_ahash_alignmask(ahash) &
  48. ~(crypto_tfm_ctx_alignment() - 1));
  49. len = ALIGN(len, crypto_tfm_ctx_alignment());
  50. len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
  51. len = ALIGN(len, __alignof__(struct scatterlist));
  52. len += sizeof(struct scatterlist) * nfrags;
  53. return kmalloc(len, GFP_ATOMIC);
  54. }
  55. static inline struct tmp_ext *ah_tmp_ext(void *base)
  56. {
  57. return base + IPV6HDR_BASELEN;
  58. }
  59. static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
  60. {
  61. return tmp + offset;
  62. }
  63. static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
  64. unsigned int offset)
  65. {
  66. return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
  67. }
  68. static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
  69. u8 *icv)
  70. {
  71. struct ahash_request *req;
  72. req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
  73. crypto_tfm_ctx_alignment());
  74. ahash_request_set_tfm(req, ahash);
  75. return req;
  76. }
  77. static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
  78. struct ahash_request *req)
  79. {
  80. return (void *)ALIGN((unsigned long)(req + 1) +
  81. crypto_ahash_reqsize(ahash),
  82. __alignof__(struct scatterlist));
  83. }
  84. static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
  85. {
  86. u8 *opt = (u8 *)opthdr;
  87. int len = ipv6_optlen(opthdr);
  88. int off = 0;
  89. int optlen = 0;
  90. off += 2;
  91. len -= 2;
  92. while (len > 0) {
  93. switch (opt[off]) {
  94. case IPV6_TLV_PAD1:
  95. optlen = 1;
  96. break;
  97. default:
  98. if (len < 2)
  99. goto bad;
  100. optlen = opt[off+1]+2;
  101. if (len < optlen)
  102. goto bad;
  103. if (opt[off] & 0x20)
  104. memset(&opt[off+2], 0, opt[off+1]);
  105. break;
  106. }
  107. off += optlen;
  108. len -= optlen;
  109. }
  110. if (len == 0)
  111. return true;
  112. bad:
  113. return false;
  114. }
  115. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  116. /**
  117. * ipv6_rearrange_destopt - rearrange IPv6 destination options header
  118. * @iph: IPv6 header
  119. * @destopt: destionation options header
  120. */
  121. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
  122. {
  123. u8 *opt = (u8 *)destopt;
  124. int len = ipv6_optlen(destopt);
  125. int off = 0;
  126. int optlen = 0;
  127. off += 2;
  128. len -= 2;
  129. while (len > 0) {
  130. switch (opt[off]) {
  131. case IPV6_TLV_PAD1:
  132. optlen = 1;
  133. break;
  134. default:
  135. if (len < 2)
  136. goto bad;
  137. optlen = opt[off+1]+2;
  138. if (len < optlen)
  139. goto bad;
  140. /* Rearrange the source address in @iph and the
  141. * addresses in home address option for final source.
  142. * See 11.3.2 of RFC 3775 for details.
  143. */
  144. if (opt[off] == IPV6_TLV_HAO) {
  145. struct ipv6_destopt_hao *hao;
  146. hao = (struct ipv6_destopt_hao *)&opt[off];
  147. if (hao->length != sizeof(hao->addr)) {
  148. net_warn_ratelimited("destopt hao: invalid header length: %u\n",
  149. hao->length);
  150. goto bad;
  151. }
  152. swap(hao->addr, iph->saddr);
  153. }
  154. break;
  155. }
  156. off += optlen;
  157. len -= optlen;
  158. }
  159. /* Note: ok if len == 0 */
  160. bad:
  161. return;
  162. }
  163. #else
  164. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
  165. #endif
  166. /**
  167. * ipv6_rearrange_rthdr - rearrange IPv6 routing header
  168. * @iph: IPv6 header
  169. * @rthdr: routing header
  170. *
  171. * Rearrange the destination address in @iph and the addresses in @rthdr
  172. * so that they appear in the order they will at the final destination.
  173. * See Appendix A2 of RFC 2402 for details.
  174. */
  175. static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
  176. {
  177. int segments, segments_left;
  178. struct in6_addr *addrs;
  179. struct in6_addr final_addr;
  180. segments_left = rthdr->segments_left;
  181. if (segments_left == 0)
  182. return;
  183. rthdr->segments_left = 0;
  184. /* The value of rthdr->hdrlen has been verified either by the system
  185. * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
  186. * packets. So we can assume that it is even and that segments is
  187. * greater than or equal to segments_left.
  188. *
  189. * For the same reason we can assume that this option is of type 0.
  190. */
  191. segments = rthdr->hdrlen >> 1;
  192. addrs = ((struct rt0_hdr *)rthdr)->addr;
  193. final_addr = addrs[segments - 1];
  194. addrs += segments - segments_left;
  195. memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
  196. addrs[0] = iph->daddr;
  197. iph->daddr = final_addr;
  198. }
  199. static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
  200. {
  201. union {
  202. struct ipv6hdr *iph;
  203. struct ipv6_opt_hdr *opth;
  204. struct ipv6_rt_hdr *rth;
  205. char *raw;
  206. } exthdr = { .iph = iph };
  207. char *end = exthdr.raw + len;
  208. int nexthdr = iph->nexthdr;
  209. exthdr.iph++;
  210. while (exthdr.raw < end) {
  211. switch (nexthdr) {
  212. case NEXTHDR_DEST:
  213. if (dir == XFRM_POLICY_OUT)
  214. ipv6_rearrange_destopt(iph, exthdr.opth);
  215. fallthrough;
  216. case NEXTHDR_HOP:
  217. if (!zero_out_mutable_opts(exthdr.opth)) {
  218. net_dbg_ratelimited("overrun %sopts\n",
  219. nexthdr == NEXTHDR_HOP ?
  220. "hop" : "dest");
  221. return -EINVAL;
  222. }
  223. break;
  224. case NEXTHDR_ROUTING:
  225. ipv6_rearrange_rthdr(iph, exthdr.rth);
  226. break;
  227. default:
  228. return 0;
  229. }
  230. nexthdr = exthdr.opth->nexthdr;
  231. exthdr.raw += ipv6_optlen(exthdr.opth);
  232. }
  233. return 0;
  234. }
  235. static void ah6_output_done(struct crypto_async_request *base, int err)
  236. {
  237. int extlen;
  238. u8 *iph_base;
  239. u8 *icv;
  240. struct sk_buff *skb = base->data;
  241. struct xfrm_state *x = skb_dst(skb)->xfrm;
  242. struct ah_data *ahp = x->data;
  243. struct ipv6hdr *top_iph = ipv6_hdr(skb);
  244. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  245. struct tmp_ext *iph_ext;
  246. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  247. if (extlen)
  248. extlen += sizeof(*iph_ext);
  249. iph_base = AH_SKB_CB(skb)->tmp;
  250. iph_ext = ah_tmp_ext(iph_base);
  251. icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
  252. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  253. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  254. if (extlen) {
  255. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  256. memcpy(&top_iph->saddr, iph_ext, extlen);
  257. #else
  258. memcpy(&top_iph->daddr, iph_ext, extlen);
  259. #endif
  260. }
  261. kfree(AH_SKB_CB(skb)->tmp);
  262. xfrm_output_resume(skb->sk, skb, err);
  263. }
  264. static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
  265. {
  266. int err;
  267. int nfrags;
  268. int extlen;
  269. u8 *iph_base;
  270. u8 *icv;
  271. u8 nexthdr;
  272. struct sk_buff *trailer;
  273. struct crypto_ahash *ahash;
  274. struct ahash_request *req;
  275. struct scatterlist *sg;
  276. struct ipv6hdr *top_iph;
  277. struct ip_auth_hdr *ah;
  278. struct ah_data *ahp;
  279. struct tmp_ext *iph_ext;
  280. int seqhi_len = 0;
  281. __be32 *seqhi;
  282. int sglists = 0;
  283. struct scatterlist *seqhisg;
  284. ahp = x->data;
  285. ahash = ahp->ahash;
  286. err = skb_cow_data(skb, 0, &trailer);
  287. if (err < 0)
  288. goto out;
  289. nfrags = err;
  290. skb_push(skb, -skb_network_offset(skb));
  291. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  292. if (extlen)
  293. extlen += sizeof(*iph_ext);
  294. if (x->props.flags & XFRM_STATE_ESN) {
  295. sglists = 1;
  296. seqhi_len = sizeof(*seqhi);
  297. }
  298. err = -ENOMEM;
  299. iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
  300. extlen + seqhi_len);
  301. if (!iph_base)
  302. goto out;
  303. iph_ext = ah_tmp_ext(iph_base);
  304. seqhi = (__be32 *)((char *)iph_ext + extlen);
  305. icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
  306. req = ah_tmp_req(ahash, icv);
  307. sg = ah_req_sg(ahash, req);
  308. seqhisg = sg + nfrags;
  309. ah = ip_auth_hdr(skb);
  310. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  311. top_iph = ipv6_hdr(skb);
  312. top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
  313. nexthdr = *skb_mac_header(skb);
  314. *skb_mac_header(skb) = IPPROTO_AH;
  315. /* When there are no extension headers, we only need to save the first
  316. * 8 bytes of the base IP header.
  317. */
  318. memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
  319. if (extlen) {
  320. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  321. memcpy(iph_ext, &top_iph->saddr, extlen);
  322. #else
  323. memcpy(iph_ext, &top_iph->daddr, extlen);
  324. #endif
  325. err = ipv6_clear_mutable_options(top_iph,
  326. extlen - sizeof(*iph_ext) +
  327. sizeof(*top_iph),
  328. XFRM_POLICY_OUT);
  329. if (err)
  330. goto out_free;
  331. }
  332. ah->nexthdr = nexthdr;
  333. top_iph->priority = 0;
  334. top_iph->flow_lbl[0] = 0;
  335. top_iph->flow_lbl[1] = 0;
  336. top_iph->flow_lbl[2] = 0;
  337. top_iph->hop_limit = 0;
  338. ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
  339. ah->reserved = 0;
  340. ah->spi = x->id.spi;
  341. ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  342. sg_init_table(sg, nfrags + sglists);
  343. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  344. if (unlikely(err < 0))
  345. goto out_free;
  346. if (x->props.flags & XFRM_STATE_ESN) {
  347. /* Attach seqhi sg right after packet payload */
  348. *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
  349. sg_set_buf(seqhisg, seqhi, seqhi_len);
  350. }
  351. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  352. ahash_request_set_callback(req, 0, ah6_output_done, skb);
  353. AH_SKB_CB(skb)->tmp = iph_base;
  354. err = crypto_ahash_digest(req);
  355. if (err) {
  356. if (err == -EINPROGRESS)
  357. goto out;
  358. if (err == -ENOSPC)
  359. err = NET_XMIT_DROP;
  360. goto out_free;
  361. }
  362. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  363. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  364. if (extlen) {
  365. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  366. memcpy(&top_iph->saddr, iph_ext, extlen);
  367. #else
  368. memcpy(&top_iph->daddr, iph_ext, extlen);
  369. #endif
  370. }
  371. out_free:
  372. kfree(iph_base);
  373. out:
  374. return err;
  375. }
  376. static void ah6_input_done(struct crypto_async_request *base, int err)
  377. {
  378. u8 *auth_data;
  379. u8 *icv;
  380. u8 *work_iph;
  381. struct sk_buff *skb = base->data;
  382. struct xfrm_state *x = xfrm_input_state(skb);
  383. struct ah_data *ahp = x->data;
  384. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  385. int hdr_len = skb_network_header_len(skb);
  386. int ah_hlen = ipv6_authlen(ah);
  387. if (err)
  388. goto out;
  389. work_iph = AH_SKB_CB(skb)->tmp;
  390. auth_data = ah_tmp_auth(work_iph, hdr_len);
  391. icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
  392. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  393. if (err)
  394. goto out;
  395. err = ah->nexthdr;
  396. skb->network_header += ah_hlen;
  397. memcpy(skb_network_header(skb), work_iph, hdr_len);
  398. __skb_pull(skb, ah_hlen + hdr_len);
  399. if (x->props.mode == XFRM_MODE_TUNNEL)
  400. skb_reset_transport_header(skb);
  401. else
  402. skb_set_transport_header(skb, -hdr_len);
  403. out:
  404. kfree(AH_SKB_CB(skb)->tmp);
  405. xfrm_input_resume(skb, err);
  406. }
  407. static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
  408. {
  409. /*
  410. * Before process AH
  411. * [IPv6][Ext1][Ext2][AH][Dest][Payload]
  412. * |<-------------->| hdr_len
  413. *
  414. * To erase AH:
  415. * Keeping copy of cleared headers. After AH processing,
  416. * Moving the pointer of skb->network_header by using skb_pull as long
  417. * as AH header length. Then copy back the copy as long as hdr_len
  418. * If destination header following AH exists, copy it into after [Ext2].
  419. *
  420. * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
  421. * There is offset of AH before IPv6 header after the process.
  422. */
  423. u8 *auth_data;
  424. u8 *icv;
  425. u8 *work_iph;
  426. struct sk_buff *trailer;
  427. struct crypto_ahash *ahash;
  428. struct ahash_request *req;
  429. struct scatterlist *sg;
  430. struct ip_auth_hdr *ah;
  431. struct ipv6hdr *ip6h;
  432. struct ah_data *ahp;
  433. u16 hdr_len;
  434. u16 ah_hlen;
  435. int nexthdr;
  436. int nfrags;
  437. int err = -ENOMEM;
  438. int seqhi_len = 0;
  439. __be32 *seqhi;
  440. int sglists = 0;
  441. struct scatterlist *seqhisg;
  442. if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
  443. goto out;
  444. /* We are going to _remove_ AH header to keep sockets happy,
  445. * so... Later this can change. */
  446. if (skb_unclone(skb, GFP_ATOMIC))
  447. goto out;
  448. skb->ip_summed = CHECKSUM_NONE;
  449. hdr_len = skb_network_header_len(skb);
  450. ah = (struct ip_auth_hdr *)skb->data;
  451. ahp = x->data;
  452. ahash = ahp->ahash;
  453. nexthdr = ah->nexthdr;
  454. ah_hlen = ipv6_authlen(ah);
  455. if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
  456. ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
  457. goto out;
  458. if (!pskb_may_pull(skb, ah_hlen))
  459. goto out;
  460. err = skb_cow_data(skb, 0, &trailer);
  461. if (err < 0)
  462. goto out;
  463. nfrags = err;
  464. ah = (struct ip_auth_hdr *)skb->data;
  465. ip6h = ipv6_hdr(skb);
  466. skb_push(skb, hdr_len);
  467. if (x->props.flags & XFRM_STATE_ESN) {
  468. sglists = 1;
  469. seqhi_len = sizeof(*seqhi);
  470. }
  471. work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
  472. ahp->icv_trunc_len + seqhi_len);
  473. if (!work_iph) {
  474. err = -ENOMEM;
  475. goto out;
  476. }
  477. auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
  478. seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
  479. icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
  480. req = ah_tmp_req(ahash, icv);
  481. sg = ah_req_sg(ahash, req);
  482. seqhisg = sg + nfrags;
  483. memcpy(work_iph, ip6h, hdr_len);
  484. memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
  485. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  486. err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
  487. if (err)
  488. goto out_free;
  489. ip6h->priority = 0;
  490. ip6h->flow_lbl[0] = 0;
  491. ip6h->flow_lbl[1] = 0;
  492. ip6h->flow_lbl[2] = 0;
  493. ip6h->hop_limit = 0;
  494. sg_init_table(sg, nfrags + sglists);
  495. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  496. if (unlikely(err < 0))
  497. goto out_free;
  498. if (x->props.flags & XFRM_STATE_ESN) {
  499. /* Attach seqhi sg right after packet payload */
  500. *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
  501. sg_set_buf(seqhisg, seqhi, seqhi_len);
  502. }
  503. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  504. ahash_request_set_callback(req, 0, ah6_input_done, skb);
  505. AH_SKB_CB(skb)->tmp = work_iph;
  506. err = crypto_ahash_digest(req);
  507. if (err) {
  508. if (err == -EINPROGRESS)
  509. goto out;
  510. goto out_free;
  511. }
  512. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  513. if (err)
  514. goto out_free;
  515. skb->network_header += ah_hlen;
  516. memcpy(skb_network_header(skb), work_iph, hdr_len);
  517. __skb_pull(skb, ah_hlen + hdr_len);
  518. if (x->props.mode == XFRM_MODE_TUNNEL)
  519. skb_reset_transport_header(skb);
  520. else
  521. skb_set_transport_header(skb, -hdr_len);
  522. err = nexthdr;
  523. out_free:
  524. kfree(work_iph);
  525. out:
  526. return err;
  527. }
  528. static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  529. u8 type, u8 code, int offset, __be32 info)
  530. {
  531. struct net *net = dev_net(skb->dev);
  532. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  533. struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
  534. struct xfrm_state *x;
  535. if (type != ICMPV6_PKT_TOOBIG &&
  536. type != NDISC_REDIRECT)
  537. return 0;
  538. x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
  539. if (!x)
  540. return 0;
  541. if (type == NDISC_REDIRECT)
  542. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  543. sock_net_uid(net, NULL));
  544. else
  545. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  546. xfrm_state_put(x);
  547. return 0;
  548. }
  549. static int ah6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
  550. {
  551. struct ah_data *ahp = NULL;
  552. struct xfrm_algo_desc *aalg_desc;
  553. struct crypto_ahash *ahash;
  554. if (!x->aalg) {
  555. NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
  556. goto error;
  557. }
  558. if (x->encap) {
  559. NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
  560. goto error;
  561. }
  562. ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
  563. if (!ahp)
  564. return -ENOMEM;
  565. ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
  566. if (IS_ERR(ahash)) {
  567. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  568. goto error;
  569. }
  570. ahp->ahash = ahash;
  571. if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
  572. (x->aalg->alg_key_len + 7) / 8)) {
  573. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  574. goto error;
  575. }
  576. /*
  577. * Lookup the algorithm description maintained by xfrm_algo,
  578. * verify crypto transform properties, and store information
  579. * we need for AH processing. This lookup cannot fail here
  580. * after a successful crypto_alloc_hash().
  581. */
  582. aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
  583. BUG_ON(!aalg_desc);
  584. if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
  585. crypto_ahash_digestsize(ahash)) {
  586. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  587. goto error;
  588. }
  589. ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
  590. ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
  591. x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
  592. ahp->icv_trunc_len);
  593. switch (x->props.mode) {
  594. case XFRM_MODE_BEET:
  595. case XFRM_MODE_TRANSPORT:
  596. break;
  597. case XFRM_MODE_TUNNEL:
  598. x->props.header_len += sizeof(struct ipv6hdr);
  599. break;
  600. default:
  601. NL_SET_ERR_MSG(extack, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
  602. goto error;
  603. }
  604. x->data = ahp;
  605. return 0;
  606. error:
  607. if (ahp) {
  608. crypto_free_ahash(ahp->ahash);
  609. kfree(ahp);
  610. }
  611. return -EINVAL;
  612. }
  613. static void ah6_destroy(struct xfrm_state *x)
  614. {
  615. struct ah_data *ahp = x->data;
  616. if (!ahp)
  617. return;
  618. crypto_free_ahash(ahp->ahash);
  619. kfree(ahp);
  620. }
  621. static int ah6_rcv_cb(struct sk_buff *skb, int err)
  622. {
  623. return 0;
  624. }
  625. static const struct xfrm_type ah6_type = {
  626. .owner = THIS_MODULE,
  627. .proto = IPPROTO_AH,
  628. .flags = XFRM_TYPE_REPLAY_PROT,
  629. .init_state = ah6_init_state,
  630. .destructor = ah6_destroy,
  631. .input = ah6_input,
  632. .output = ah6_output,
  633. };
  634. static struct xfrm6_protocol ah6_protocol = {
  635. .handler = xfrm6_rcv,
  636. .input_handler = xfrm_input,
  637. .cb_handler = ah6_rcv_cb,
  638. .err_handler = ah6_err,
  639. .priority = 0,
  640. };
  641. static int __init ah6_init(void)
  642. {
  643. if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
  644. pr_info("%s: can't add xfrm type\n", __func__);
  645. return -EAGAIN;
  646. }
  647. if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
  648. pr_info("%s: can't add protocol\n", __func__);
  649. xfrm_unregister_type(&ah6_type, AF_INET6);
  650. return -EAGAIN;
  651. }
  652. return 0;
  653. }
  654. static void __exit ah6_fini(void)
  655. {
  656. if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
  657. pr_info("%s: can't remove protocol\n", __func__);
  658. xfrm_unregister_type(&ah6_type, AF_INET6);
  659. }
  660. module_init(ah6_init);
  661. module_exit(ah6_fini);
  662. MODULE_LICENSE("GPL");
  663. MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);