esp4.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #define pr_fmt(fmt) "IPsec: " fmt
  3. #include <crypto/aead.h>
  4. #include <crypto/authenc.h>
  5. #include <linux/err.h>
  6. #include <linux/module.h>
  7. #include <net/ip.h>
  8. #include <net/xfrm.h>
  9. #include <net/esp.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pfkeyv2.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/in6.h>
  17. #include <net/icmp.h>
  18. #include <net/protocol.h>
  19. #include <net/udp.h>
  20. #include <net/tcp.h>
  21. #include <net/espintcp.h>
  22. #include <linux/highmem.h>
  23. struct esp_skb_cb {
  24. struct xfrm_skb_cb xfrm;
  25. void *tmp;
  26. };
  27. struct esp_output_extra {
  28. __be32 seqhi;
  29. u32 esphoff;
  30. };
  31. #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  32. /*
  33. * Allocate an AEAD request structure with extra space for SG and IV.
  34. *
  35. * For alignment considerations the IV is placed at the front, followed
  36. * by the request and finally the SG list.
  37. *
  38. * TODO: Use spare space in skb for this where possible.
  39. */
  40. static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
  41. {
  42. unsigned int len;
  43. len = extralen;
  44. len += crypto_aead_ivsize(aead);
  45. if (len) {
  46. len += crypto_aead_alignmask(aead) &
  47. ~(crypto_tfm_ctx_alignment() - 1);
  48. len = ALIGN(len, crypto_tfm_ctx_alignment());
  49. }
  50. len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  51. len = ALIGN(len, __alignof__(struct scatterlist));
  52. len += sizeof(struct scatterlist) * nfrags;
  53. return kmalloc(len, GFP_ATOMIC);
  54. }
  55. static inline void *esp_tmp_extra(void *tmp)
  56. {
  57. return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  58. }
  59. static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
  60. {
  61. return crypto_aead_ivsize(aead) ?
  62. PTR_ALIGN((u8 *)tmp + extralen,
  63. crypto_aead_alignmask(aead) + 1) : tmp + extralen;
  64. }
  65. static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  66. {
  67. struct aead_request *req;
  68. req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
  69. crypto_tfm_ctx_alignment());
  70. aead_request_set_tfm(req, aead);
  71. return req;
  72. }
  73. static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
  74. struct aead_request *req)
  75. {
  76. return (void *)ALIGN((unsigned long)(req + 1) +
  77. crypto_aead_reqsize(aead),
  78. __alignof__(struct scatterlist));
  79. }
  80. static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
  81. {
  82. struct crypto_aead *aead = x->data;
  83. int extralen = 0;
  84. u8 *iv;
  85. struct aead_request *req;
  86. struct scatterlist *sg;
  87. if (x->props.flags & XFRM_STATE_ESN)
  88. extralen += sizeof(struct esp_output_extra);
  89. iv = esp_tmp_iv(aead, tmp, extralen);
  90. req = esp_tmp_req(aead, iv);
  91. /* Unref skb_frag_pages in the src scatterlist if necessary.
  92. * Skip the first sg which comes from skb->data.
  93. */
  94. if (req->src != req->dst)
  95. for (sg = sg_next(req->src); sg; sg = sg_next(sg))
  96. put_page(sg_page(sg));
  97. }
  98. #ifdef CONFIG_INET_ESPINTCP
  99. struct esp_tcp_sk {
  100. struct sock *sk;
  101. struct rcu_head rcu;
  102. };
  103. static void esp_free_tcp_sk(struct rcu_head *head)
  104. {
  105. struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
  106. sock_put(esk->sk);
  107. kfree(esk);
  108. }
  109. static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
  110. {
  111. struct xfrm_encap_tmpl *encap = x->encap;
  112. struct net *net = xs_net(x);
  113. struct esp_tcp_sk *esk;
  114. __be16 sport, dport;
  115. struct sock *nsk;
  116. struct sock *sk;
  117. sk = rcu_dereference(x->encap_sk);
  118. if (sk && sk->sk_state == TCP_ESTABLISHED)
  119. return sk;
  120. spin_lock_bh(&x->lock);
  121. sport = encap->encap_sport;
  122. dport = encap->encap_dport;
  123. nsk = rcu_dereference_protected(x->encap_sk,
  124. lockdep_is_held(&x->lock));
  125. if (sk && sk == nsk) {
  126. esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
  127. if (!esk) {
  128. spin_unlock_bh(&x->lock);
  129. return ERR_PTR(-ENOMEM);
  130. }
  131. RCU_INIT_POINTER(x->encap_sk, NULL);
  132. esk->sk = sk;
  133. call_rcu(&esk->rcu, esp_free_tcp_sk);
  134. }
  135. spin_unlock_bh(&x->lock);
  136. sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
  137. dport, x->props.saddr.a4, sport, 0);
  138. if (!sk)
  139. return ERR_PTR(-ENOENT);
  140. if (!tcp_is_ulp_esp(sk)) {
  141. sock_put(sk);
  142. return ERR_PTR(-EINVAL);
  143. }
  144. spin_lock_bh(&x->lock);
  145. nsk = rcu_dereference_protected(x->encap_sk,
  146. lockdep_is_held(&x->lock));
  147. if (encap->encap_sport != sport ||
  148. encap->encap_dport != dport) {
  149. sock_put(sk);
  150. sk = nsk ?: ERR_PTR(-EREMCHG);
  151. } else if (sk == nsk) {
  152. sock_put(sk);
  153. } else {
  154. rcu_assign_pointer(x->encap_sk, sk);
  155. }
  156. spin_unlock_bh(&x->lock);
  157. return sk;
  158. }
  159. static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
  160. {
  161. struct sock *sk;
  162. int err;
  163. rcu_read_lock();
  164. sk = esp_find_tcp_sk(x);
  165. err = PTR_ERR_OR_ZERO(sk);
  166. if (err)
  167. goto out;
  168. bh_lock_sock(sk);
  169. if (sock_owned_by_user(sk))
  170. err = espintcp_queue_out(sk, skb);
  171. else
  172. err = espintcp_push_skb(sk, skb);
  173. bh_unlock_sock(sk);
  174. out:
  175. rcu_read_unlock();
  176. return err;
  177. }
  178. static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
  179. struct sk_buff *skb)
  180. {
  181. struct dst_entry *dst = skb_dst(skb);
  182. struct xfrm_state *x = dst->xfrm;
  183. return esp_output_tcp_finish(x, skb);
  184. }
  185. static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
  186. {
  187. int err;
  188. local_bh_disable();
  189. err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
  190. local_bh_enable();
  191. /* EINPROGRESS just happens to do the right thing. It
  192. * actually means that the skb has been consumed and
  193. * isn't coming back.
  194. */
  195. return err ?: -EINPROGRESS;
  196. }
  197. #else
  198. static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
  199. {
  200. kfree_skb(skb);
  201. return -EOPNOTSUPP;
  202. }
  203. #endif
  204. static void esp_output_done(struct crypto_async_request *base, int err)
  205. {
  206. struct sk_buff *skb = base->data;
  207. struct xfrm_offload *xo = xfrm_offload(skb);
  208. void *tmp;
  209. struct xfrm_state *x;
  210. if (xo && (xo->flags & XFRM_DEV_RESUME)) {
  211. struct sec_path *sp = skb_sec_path(skb);
  212. x = sp->xvec[sp->len - 1];
  213. } else {
  214. x = skb_dst(skb)->xfrm;
  215. }
  216. tmp = ESP_SKB_CB(skb)->tmp;
  217. esp_ssg_unref(x, tmp);
  218. kfree(tmp);
  219. if (xo && (xo->flags & XFRM_DEV_RESUME)) {
  220. if (err) {
  221. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  222. kfree_skb(skb);
  223. return;
  224. }
  225. skb_push(skb, skb->data - skb_mac_header(skb));
  226. secpath_reset(skb);
  227. xfrm_dev_resume(skb);
  228. } else {
  229. if (!err &&
  230. x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
  231. esp_output_tail_tcp(x, skb);
  232. else
  233. xfrm_output_resume(skb->sk, skb, err);
  234. }
  235. }
  236. /* Move ESP header back into place. */
  237. static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
  238. {
  239. struct ip_esp_hdr *esph = (void *)(skb->data + offset);
  240. void *tmp = ESP_SKB_CB(skb)->tmp;
  241. __be32 *seqhi = esp_tmp_extra(tmp);
  242. esph->seq_no = esph->spi;
  243. esph->spi = *seqhi;
  244. }
  245. static void esp_output_restore_header(struct sk_buff *skb)
  246. {
  247. void *tmp = ESP_SKB_CB(skb)->tmp;
  248. struct esp_output_extra *extra = esp_tmp_extra(tmp);
  249. esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
  250. sizeof(__be32));
  251. }
  252. static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
  253. struct xfrm_state *x,
  254. struct ip_esp_hdr *esph,
  255. struct esp_output_extra *extra)
  256. {
  257. /* For ESN we move the header forward by 4 bytes to
  258. * accommodate the high bits. We will move it back after
  259. * encryption.
  260. */
  261. if ((x->props.flags & XFRM_STATE_ESN)) {
  262. __u32 seqhi;
  263. struct xfrm_offload *xo = xfrm_offload(skb);
  264. if (xo)
  265. seqhi = xo->seq.hi;
  266. else
  267. seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
  268. extra->esphoff = (unsigned char *)esph -
  269. skb_transport_header(skb);
  270. esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
  271. extra->seqhi = esph->spi;
  272. esph->seq_no = htonl(seqhi);
  273. }
  274. esph->spi = x->id.spi;
  275. return esph;
  276. }
  277. static void esp_output_done_esn(struct crypto_async_request *base, int err)
  278. {
  279. struct sk_buff *skb = base->data;
  280. esp_output_restore_header(skb);
  281. esp_output_done(base, err);
  282. }
  283. static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
  284. int encap_type,
  285. struct esp_info *esp,
  286. __be16 sport,
  287. __be16 dport)
  288. {
  289. struct udphdr *uh;
  290. __be32 *udpdata32;
  291. unsigned int len;
  292. len = skb->len + esp->tailen - skb_transport_offset(skb);
  293. if (len + sizeof(struct iphdr) > IP_MAX_MTU)
  294. return ERR_PTR(-EMSGSIZE);
  295. uh = (struct udphdr *)esp->esph;
  296. uh->source = sport;
  297. uh->dest = dport;
  298. uh->len = htons(len);
  299. uh->check = 0;
  300. *skb_mac_header(skb) = IPPROTO_UDP;
  301. if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
  302. udpdata32 = (__be32 *)(uh + 1);
  303. udpdata32[0] = udpdata32[1] = 0;
  304. return (struct ip_esp_hdr *)(udpdata32 + 2);
  305. }
  306. return (struct ip_esp_hdr *)(uh + 1);
  307. }
  308. #ifdef CONFIG_INET_ESPINTCP
  309. static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
  310. struct sk_buff *skb,
  311. struct esp_info *esp)
  312. {
  313. __be16 *lenp = (void *)esp->esph;
  314. struct ip_esp_hdr *esph;
  315. unsigned int len;
  316. struct sock *sk;
  317. len = skb->len + esp->tailen - skb_transport_offset(skb);
  318. if (len > IP_MAX_MTU)
  319. return ERR_PTR(-EMSGSIZE);
  320. rcu_read_lock();
  321. sk = esp_find_tcp_sk(x);
  322. rcu_read_unlock();
  323. if (IS_ERR(sk))
  324. return ERR_CAST(sk);
  325. *lenp = htons(len);
  326. esph = (struct ip_esp_hdr *)(lenp + 1);
  327. return esph;
  328. }
  329. #else
  330. static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
  331. struct sk_buff *skb,
  332. struct esp_info *esp)
  333. {
  334. return ERR_PTR(-EOPNOTSUPP);
  335. }
  336. #endif
  337. static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
  338. struct esp_info *esp)
  339. {
  340. struct xfrm_encap_tmpl *encap = x->encap;
  341. struct ip_esp_hdr *esph;
  342. __be16 sport, dport;
  343. int encap_type;
  344. spin_lock_bh(&x->lock);
  345. sport = encap->encap_sport;
  346. dport = encap->encap_dport;
  347. encap_type = encap->encap_type;
  348. spin_unlock_bh(&x->lock);
  349. switch (encap_type) {
  350. default:
  351. case UDP_ENCAP_ESPINUDP:
  352. case UDP_ENCAP_ESPINUDP_NON_IKE:
  353. esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport);
  354. break;
  355. case TCP_ENCAP_ESPINTCP:
  356. esph = esp_output_tcp_encap(x, skb, esp);
  357. break;
  358. }
  359. if (IS_ERR(esph))
  360. return PTR_ERR(esph);
  361. esp->esph = esph;
  362. return 0;
  363. }
  364. int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
  365. {
  366. u8 *tail;
  367. int nfrags;
  368. int esph_offset;
  369. struct page *page;
  370. struct sk_buff *trailer;
  371. int tailen = esp->tailen;
  372. /* this is non-NULL only with TCP/UDP Encapsulation */
  373. if (x->encap) {
  374. int err = esp_output_encap(x, skb, esp);
  375. if (err < 0)
  376. return err;
  377. }
  378. if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
  379. ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
  380. goto cow;
  381. if (!skb_cloned(skb)) {
  382. if (tailen <= skb_tailroom(skb)) {
  383. nfrags = 1;
  384. trailer = skb;
  385. tail = skb_tail_pointer(trailer);
  386. goto skip_cow;
  387. } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
  388. && !skb_has_frag_list(skb)) {
  389. int allocsize;
  390. struct sock *sk = skb->sk;
  391. struct page_frag *pfrag = &x->xfrag;
  392. esp->inplace = false;
  393. allocsize = ALIGN(tailen, L1_CACHE_BYTES);
  394. spin_lock_bh(&x->lock);
  395. if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
  396. spin_unlock_bh(&x->lock);
  397. goto cow;
  398. }
  399. page = pfrag->page;
  400. get_page(page);
  401. tail = page_address(page) + pfrag->offset;
  402. esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
  403. nfrags = skb_shinfo(skb)->nr_frags;
  404. __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
  405. tailen);
  406. skb_shinfo(skb)->nr_frags = ++nfrags;
  407. pfrag->offset = pfrag->offset + allocsize;
  408. spin_unlock_bh(&x->lock);
  409. nfrags++;
  410. skb_len_add(skb, tailen);
  411. if (sk && sk_fullsock(sk))
  412. refcount_add(tailen, &sk->sk_wmem_alloc);
  413. goto out;
  414. }
  415. }
  416. cow:
  417. esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
  418. nfrags = skb_cow_data(skb, tailen, &trailer);
  419. if (nfrags < 0)
  420. goto out;
  421. tail = skb_tail_pointer(trailer);
  422. esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
  423. skip_cow:
  424. esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
  425. pskb_put(skb, trailer, tailen);
  426. out:
  427. return nfrags;
  428. }
  429. EXPORT_SYMBOL_GPL(esp_output_head);
  430. int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
  431. {
  432. u8 *iv;
  433. int alen;
  434. void *tmp;
  435. int ivlen;
  436. int assoclen;
  437. int extralen;
  438. struct page *page;
  439. struct ip_esp_hdr *esph;
  440. struct crypto_aead *aead;
  441. struct aead_request *req;
  442. struct scatterlist *sg, *dsg;
  443. struct esp_output_extra *extra;
  444. int err = -ENOMEM;
  445. assoclen = sizeof(struct ip_esp_hdr);
  446. extralen = 0;
  447. if (x->props.flags & XFRM_STATE_ESN) {
  448. extralen += sizeof(*extra);
  449. assoclen += sizeof(__be32);
  450. }
  451. aead = x->data;
  452. alen = crypto_aead_authsize(aead);
  453. ivlen = crypto_aead_ivsize(aead);
  454. tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
  455. if (!tmp)
  456. goto error;
  457. extra = esp_tmp_extra(tmp);
  458. iv = esp_tmp_iv(aead, tmp, extralen);
  459. req = esp_tmp_req(aead, iv);
  460. sg = esp_req_sg(aead, req);
  461. if (esp->inplace)
  462. dsg = sg;
  463. else
  464. dsg = &sg[esp->nfrags];
  465. esph = esp_output_set_extra(skb, x, esp->esph, extra);
  466. esp->esph = esph;
  467. sg_init_table(sg, esp->nfrags);
  468. err = skb_to_sgvec(skb, sg,
  469. (unsigned char *)esph - skb->data,
  470. assoclen + ivlen + esp->clen + alen);
  471. if (unlikely(err < 0))
  472. goto error_free;
  473. if (!esp->inplace) {
  474. int allocsize;
  475. struct page_frag *pfrag = &x->xfrag;
  476. allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
  477. spin_lock_bh(&x->lock);
  478. if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
  479. spin_unlock_bh(&x->lock);
  480. goto error_free;
  481. }
  482. skb_shinfo(skb)->nr_frags = 1;
  483. page = pfrag->page;
  484. get_page(page);
  485. /* replace page frags in skb with new page */
  486. __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
  487. pfrag->offset = pfrag->offset + allocsize;
  488. spin_unlock_bh(&x->lock);
  489. sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
  490. err = skb_to_sgvec(skb, dsg,
  491. (unsigned char *)esph - skb->data,
  492. assoclen + ivlen + esp->clen + alen);
  493. if (unlikely(err < 0))
  494. goto error_free;
  495. }
  496. if ((x->props.flags & XFRM_STATE_ESN))
  497. aead_request_set_callback(req, 0, esp_output_done_esn, skb);
  498. else
  499. aead_request_set_callback(req, 0, esp_output_done, skb);
  500. aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
  501. aead_request_set_ad(req, assoclen);
  502. memset(iv, 0, ivlen);
  503. memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
  504. min(ivlen, 8));
  505. ESP_SKB_CB(skb)->tmp = tmp;
  506. err = crypto_aead_encrypt(req);
  507. switch (err) {
  508. case -EINPROGRESS:
  509. goto error;
  510. case -ENOSPC:
  511. err = NET_XMIT_DROP;
  512. break;
  513. case 0:
  514. if ((x->props.flags & XFRM_STATE_ESN))
  515. esp_output_restore_header(skb);
  516. }
  517. if (sg != dsg)
  518. esp_ssg_unref(x, tmp);
  519. if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
  520. err = esp_output_tail_tcp(x, skb);
  521. error_free:
  522. kfree(tmp);
  523. error:
  524. return err;
  525. }
  526. EXPORT_SYMBOL_GPL(esp_output_tail);
  527. static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
  528. {
  529. int alen;
  530. int blksize;
  531. struct ip_esp_hdr *esph;
  532. struct crypto_aead *aead;
  533. struct esp_info esp;
  534. esp.inplace = true;
  535. esp.proto = *skb_mac_header(skb);
  536. *skb_mac_header(skb) = IPPROTO_ESP;
  537. /* skb is pure payload to encrypt */
  538. aead = x->data;
  539. alen = crypto_aead_authsize(aead);
  540. esp.tfclen = 0;
  541. if (x->tfcpad) {
  542. struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
  543. u32 padto;
  544. padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
  545. if (skb->len < padto)
  546. esp.tfclen = padto - skb->len;
  547. }
  548. blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  549. esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
  550. esp.plen = esp.clen - skb->len - esp.tfclen;
  551. esp.tailen = esp.tfclen + esp.plen + alen;
  552. esp.esph = ip_esp_hdr(skb);
  553. esp.nfrags = esp_output_head(x, skb, &esp);
  554. if (esp.nfrags < 0)
  555. return esp.nfrags;
  556. esph = esp.esph;
  557. esph->spi = x->id.spi;
  558. esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  559. esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
  560. ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
  561. skb_push(skb, -skb_network_offset(skb));
  562. return esp_output_tail(x, skb, &esp);
  563. }
  564. static inline int esp_remove_trailer(struct sk_buff *skb)
  565. {
  566. struct xfrm_state *x = xfrm_input_state(skb);
  567. struct crypto_aead *aead = x->data;
  568. int alen, hlen, elen;
  569. int padlen, trimlen;
  570. __wsum csumdiff;
  571. u8 nexthdr[2];
  572. int ret;
  573. alen = crypto_aead_authsize(aead);
  574. hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
  575. elen = skb->len - hlen;
  576. if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
  577. BUG();
  578. ret = -EINVAL;
  579. padlen = nexthdr[0];
  580. if (padlen + 2 + alen >= elen) {
  581. net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
  582. padlen + 2, elen - alen);
  583. goto out;
  584. }
  585. trimlen = alen + padlen + 2;
  586. if (skb->ip_summed == CHECKSUM_COMPLETE) {
  587. csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
  588. skb->csum = csum_block_sub(skb->csum, csumdiff,
  589. skb->len - trimlen);
  590. }
  591. ret = pskb_trim(skb, skb->len - trimlen);
  592. if (unlikely(ret))
  593. return ret;
  594. ret = nexthdr[1];
  595. out:
  596. return ret;
  597. }
  598. int esp_input_done2(struct sk_buff *skb, int err)
  599. {
  600. const struct iphdr *iph;
  601. struct xfrm_state *x = xfrm_input_state(skb);
  602. struct xfrm_offload *xo = xfrm_offload(skb);
  603. struct crypto_aead *aead = x->data;
  604. int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
  605. int ihl;
  606. if (!xo || !(xo->flags & CRYPTO_DONE))
  607. kfree(ESP_SKB_CB(skb)->tmp);
  608. if (unlikely(err))
  609. goto out;
  610. err = esp_remove_trailer(skb);
  611. if (unlikely(err < 0))
  612. goto out;
  613. iph = ip_hdr(skb);
  614. ihl = iph->ihl * 4;
  615. if (x->encap) {
  616. struct xfrm_encap_tmpl *encap = x->encap;
  617. struct tcphdr *th = (void *)(skb_network_header(skb) + ihl);
  618. struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
  619. __be16 source;
  620. switch (x->encap->encap_type) {
  621. case TCP_ENCAP_ESPINTCP:
  622. source = th->source;
  623. break;
  624. case UDP_ENCAP_ESPINUDP:
  625. case UDP_ENCAP_ESPINUDP_NON_IKE:
  626. source = uh->source;
  627. break;
  628. default:
  629. WARN_ON_ONCE(1);
  630. err = -EINVAL;
  631. goto out;
  632. }
  633. /*
  634. * 1) if the NAT-T peer's IP or port changed then
  635. * advertize the change to the keying daemon.
  636. * This is an inbound SA, so just compare
  637. * SRC ports.
  638. */
  639. if (iph->saddr != x->props.saddr.a4 ||
  640. source != encap->encap_sport) {
  641. xfrm_address_t ipaddr;
  642. ipaddr.a4 = iph->saddr;
  643. km_new_mapping(x, &ipaddr, source);
  644. /* XXX: perhaps add an extra
  645. * policy check here, to see
  646. * if we should allow or
  647. * reject a packet from a
  648. * different source
  649. * address/port.
  650. */
  651. }
  652. /*
  653. * 2) ignore UDP/TCP checksums in case
  654. * of NAT-T in Transport Mode, or
  655. * perform other post-processing fixes
  656. * as per draft-ietf-ipsec-udp-encaps-06,
  657. * section 3.1.2
  658. */
  659. if (x->props.mode == XFRM_MODE_TRANSPORT)
  660. skb->ip_summed = CHECKSUM_UNNECESSARY;
  661. }
  662. skb_pull_rcsum(skb, hlen);
  663. if (x->props.mode == XFRM_MODE_TUNNEL)
  664. skb_reset_transport_header(skb);
  665. else
  666. skb_set_transport_header(skb, -ihl);
  667. /* RFC4303: Drop dummy packets without any error */
  668. if (err == IPPROTO_NONE)
  669. err = -EINVAL;
  670. out:
  671. return err;
  672. }
  673. EXPORT_SYMBOL_GPL(esp_input_done2);
  674. static void esp_input_done(struct crypto_async_request *base, int err)
  675. {
  676. struct sk_buff *skb = base->data;
  677. xfrm_input_resume(skb, esp_input_done2(skb, err));
  678. }
  679. static void esp_input_restore_header(struct sk_buff *skb)
  680. {
  681. esp_restore_header(skb, 0);
  682. __skb_pull(skb, 4);
  683. }
  684. static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
  685. {
  686. struct xfrm_state *x = xfrm_input_state(skb);
  687. struct ip_esp_hdr *esph;
  688. /* For ESN we move the header forward by 4 bytes to
  689. * accommodate the high bits. We will move it back after
  690. * decryption.
  691. */
  692. if ((x->props.flags & XFRM_STATE_ESN)) {
  693. esph = skb_push(skb, 4);
  694. *seqhi = esph->spi;
  695. esph->spi = esph->seq_no;
  696. esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
  697. }
  698. }
  699. static void esp_input_done_esn(struct crypto_async_request *base, int err)
  700. {
  701. struct sk_buff *skb = base->data;
  702. esp_input_restore_header(skb);
  703. esp_input_done(base, err);
  704. }
  705. /*
  706. * Note: detecting truncated vs. non-truncated authentication data is very
  707. * expensive, so we only support truncated data, which is the recommended
  708. * and common case.
  709. */
  710. static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
  711. {
  712. struct crypto_aead *aead = x->data;
  713. struct aead_request *req;
  714. struct sk_buff *trailer;
  715. int ivlen = crypto_aead_ivsize(aead);
  716. int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
  717. int nfrags;
  718. int assoclen;
  719. int seqhilen;
  720. __be32 *seqhi;
  721. void *tmp;
  722. u8 *iv;
  723. struct scatterlist *sg;
  724. int err = -EINVAL;
  725. if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
  726. goto out;
  727. if (elen <= 0)
  728. goto out;
  729. assoclen = sizeof(struct ip_esp_hdr);
  730. seqhilen = 0;
  731. if (x->props.flags & XFRM_STATE_ESN) {
  732. seqhilen += sizeof(__be32);
  733. assoclen += seqhilen;
  734. }
  735. if (!skb_cloned(skb)) {
  736. if (!skb_is_nonlinear(skb)) {
  737. nfrags = 1;
  738. goto skip_cow;
  739. } else if (!skb_has_frag_list(skb)) {
  740. nfrags = skb_shinfo(skb)->nr_frags;
  741. nfrags++;
  742. goto skip_cow;
  743. }
  744. }
  745. err = skb_cow_data(skb, 0, &trailer);
  746. if (err < 0)
  747. goto out;
  748. nfrags = err;
  749. skip_cow:
  750. err = -ENOMEM;
  751. tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
  752. if (!tmp)
  753. goto out;
  754. ESP_SKB_CB(skb)->tmp = tmp;
  755. seqhi = esp_tmp_extra(tmp);
  756. iv = esp_tmp_iv(aead, tmp, seqhilen);
  757. req = esp_tmp_req(aead, iv);
  758. sg = esp_req_sg(aead, req);
  759. esp_input_set_header(skb, seqhi);
  760. sg_init_table(sg, nfrags);
  761. err = skb_to_sgvec(skb, sg, 0, skb->len);
  762. if (unlikely(err < 0)) {
  763. kfree(tmp);
  764. goto out;
  765. }
  766. skb->ip_summed = CHECKSUM_NONE;
  767. if ((x->props.flags & XFRM_STATE_ESN))
  768. aead_request_set_callback(req, 0, esp_input_done_esn, skb);
  769. else
  770. aead_request_set_callback(req, 0, esp_input_done, skb);
  771. aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
  772. aead_request_set_ad(req, assoclen);
  773. err = crypto_aead_decrypt(req);
  774. if (err == -EINPROGRESS)
  775. goto out;
  776. if ((x->props.flags & XFRM_STATE_ESN))
  777. esp_input_restore_header(skb);
  778. err = esp_input_done2(skb, err);
  779. out:
  780. return err;
  781. }
  782. static int esp4_err(struct sk_buff *skb, u32 info)
  783. {
  784. struct net *net = dev_net(skb->dev);
  785. const struct iphdr *iph = (const struct iphdr *)skb->data;
  786. struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  787. struct xfrm_state *x;
  788. switch (icmp_hdr(skb)->type) {
  789. case ICMP_DEST_UNREACH:
  790. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  791. return 0;
  792. break;
  793. case ICMP_REDIRECT:
  794. break;
  795. default:
  796. return 0;
  797. }
  798. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  799. esph->spi, IPPROTO_ESP, AF_INET);
  800. if (!x)
  801. return 0;
  802. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  803. ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
  804. else
  805. ipv4_redirect(skb, net, 0, IPPROTO_ESP);
  806. xfrm_state_put(x);
  807. return 0;
  808. }
  809. static void esp_destroy(struct xfrm_state *x)
  810. {
  811. struct crypto_aead *aead = x->data;
  812. if (!aead)
  813. return;
  814. crypto_free_aead(aead);
  815. }
  816. static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
  817. {
  818. char aead_name[CRYPTO_MAX_ALG_NAME];
  819. struct crypto_aead *aead;
  820. int err;
  821. if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
  822. x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
  823. NL_SET_ERR_MSG(extack, "Algorithm name is too long");
  824. return -ENAMETOOLONG;
  825. }
  826. aead = crypto_alloc_aead(aead_name, 0, 0);
  827. err = PTR_ERR(aead);
  828. if (IS_ERR(aead))
  829. goto error;
  830. x->data = aead;
  831. err = crypto_aead_setkey(aead, x->aead->alg_key,
  832. (x->aead->alg_key_len + 7) / 8);
  833. if (err)
  834. goto error;
  835. err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
  836. if (err)
  837. goto error;
  838. return 0;
  839. error:
  840. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  841. return err;
  842. }
  843. static int esp_init_authenc(struct xfrm_state *x,
  844. struct netlink_ext_ack *extack)
  845. {
  846. struct crypto_aead *aead;
  847. struct crypto_authenc_key_param *param;
  848. struct rtattr *rta;
  849. char *key;
  850. char *p;
  851. char authenc_name[CRYPTO_MAX_ALG_NAME];
  852. unsigned int keylen;
  853. int err;
  854. err = -ENAMETOOLONG;
  855. if ((x->props.flags & XFRM_STATE_ESN)) {
  856. if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
  857. "%s%sauthencesn(%s,%s)%s",
  858. x->geniv ?: "", x->geniv ? "(" : "",
  859. x->aalg ? x->aalg->alg_name : "digest_null",
  860. x->ealg->alg_name,
  861. x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
  862. NL_SET_ERR_MSG(extack, "Algorithm name is too long");
  863. goto error;
  864. }
  865. } else {
  866. if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
  867. "%s%sauthenc(%s,%s)%s",
  868. x->geniv ?: "", x->geniv ? "(" : "",
  869. x->aalg ? x->aalg->alg_name : "digest_null",
  870. x->ealg->alg_name,
  871. x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
  872. NL_SET_ERR_MSG(extack, "Algorithm name is too long");
  873. goto error;
  874. }
  875. }
  876. aead = crypto_alloc_aead(authenc_name, 0, 0);
  877. err = PTR_ERR(aead);
  878. if (IS_ERR(aead)) {
  879. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  880. goto error;
  881. }
  882. x->data = aead;
  883. keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
  884. (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
  885. err = -ENOMEM;
  886. key = kmalloc(keylen, GFP_KERNEL);
  887. if (!key)
  888. goto error;
  889. p = key;
  890. rta = (void *)p;
  891. rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
  892. rta->rta_len = RTA_LENGTH(sizeof(*param));
  893. param = RTA_DATA(rta);
  894. p += RTA_SPACE(sizeof(*param));
  895. if (x->aalg) {
  896. struct xfrm_algo_desc *aalg_desc;
  897. memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
  898. p += (x->aalg->alg_key_len + 7) / 8;
  899. aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
  900. BUG_ON(!aalg_desc);
  901. err = -EINVAL;
  902. if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
  903. crypto_aead_authsize(aead)) {
  904. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  905. goto free_key;
  906. }
  907. err = crypto_aead_setauthsize(
  908. aead, x->aalg->alg_trunc_len / 8);
  909. if (err) {
  910. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  911. goto free_key;
  912. }
  913. }
  914. param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
  915. memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
  916. err = crypto_aead_setkey(aead, key, keylen);
  917. free_key:
  918. kfree_sensitive(key);
  919. error:
  920. return err;
  921. }
  922. static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
  923. {
  924. struct crypto_aead *aead;
  925. u32 align;
  926. int err;
  927. x->data = NULL;
  928. if (x->aead) {
  929. err = esp_init_aead(x, extack);
  930. } else if (x->ealg) {
  931. err = esp_init_authenc(x, extack);
  932. } else {
  933. NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
  934. err = -EINVAL;
  935. }
  936. if (err)
  937. goto error;
  938. aead = x->data;
  939. x->props.header_len = sizeof(struct ip_esp_hdr) +
  940. crypto_aead_ivsize(aead);
  941. if (x->props.mode == XFRM_MODE_TUNNEL)
  942. x->props.header_len += sizeof(struct iphdr);
  943. else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
  944. x->props.header_len += IPV4_BEET_PHMAXLEN;
  945. if (x->encap) {
  946. struct xfrm_encap_tmpl *encap = x->encap;
  947. switch (encap->encap_type) {
  948. default:
  949. NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
  950. err = -EINVAL;
  951. goto error;
  952. case UDP_ENCAP_ESPINUDP:
  953. x->props.header_len += sizeof(struct udphdr);
  954. break;
  955. case UDP_ENCAP_ESPINUDP_NON_IKE:
  956. x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
  957. break;
  958. #ifdef CONFIG_INET_ESPINTCP
  959. case TCP_ENCAP_ESPINTCP:
  960. /* only the length field, TCP encap is done by
  961. * the socket
  962. */
  963. x->props.header_len += 2;
  964. break;
  965. #endif
  966. }
  967. }
  968. align = ALIGN(crypto_aead_blocksize(aead), 4);
  969. x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
  970. error:
  971. return err;
  972. }
  973. static int esp4_rcv_cb(struct sk_buff *skb, int err)
  974. {
  975. return 0;
  976. }
  977. static const struct xfrm_type esp_type =
  978. {
  979. .owner = THIS_MODULE,
  980. .proto = IPPROTO_ESP,
  981. .flags = XFRM_TYPE_REPLAY_PROT,
  982. .init_state = esp_init_state,
  983. .destructor = esp_destroy,
  984. .input = esp_input,
  985. .output = esp_output,
  986. };
  987. static struct xfrm4_protocol esp4_protocol = {
  988. .handler = xfrm4_rcv,
  989. .input_handler = xfrm_input,
  990. .cb_handler = esp4_rcv_cb,
  991. .err_handler = esp4_err,
  992. .priority = 0,
  993. };
  994. static int __init esp4_init(void)
  995. {
  996. if (xfrm_register_type(&esp_type, AF_INET) < 0) {
  997. pr_info("%s: can't add xfrm type\n", __func__);
  998. return -EAGAIN;
  999. }
  1000. if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
  1001. pr_info("%s: can't add protocol\n", __func__);
  1002. xfrm_unregister_type(&esp_type, AF_INET);
  1003. return -EAGAIN;
  1004. }
  1005. return 0;
  1006. }
  1007. static void __exit esp4_fini(void)
  1008. {
  1009. if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
  1010. pr_info("%s: can't remove protocol\n", __func__);
  1011. xfrm_unregister_type(&esp_type, AF_INET);
  1012. }
  1013. module_init(esp4_init);
  1014. module_exit(esp4_fini);
  1015. MODULE_LICENSE("GPL");
  1016. MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);