nx-aes-ccm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AES CCM routines supporting the Power 7+ Nest Accelerators driver
  4. *
  5. * Copyright (C) 2012 International Business Machines Inc.
  6. *
  7. * Author: Kent Yoder <[email protected]>
  8. */
  9. #include <crypto/internal/aead.h>
  10. #include <crypto/aes.h>
  11. #include <crypto/algapi.h>
  12. #include <crypto/scatterwalk.h>
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/crypto.h>
  16. #include <asm/vio.h>
  17. #include "nx_csbcpb.h"
  18. #include "nx.h"
  19. static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
  20. const u8 *in_key,
  21. unsigned int key_len)
  22. {
  23. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  24. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  25. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  26. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  27. switch (key_len) {
  28. case AES_KEYSIZE_128:
  29. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  30. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  31. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  32. break;
  33. default:
  34. return -EINVAL;
  35. }
  36. csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
  37. memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
  38. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
  39. memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
  40. return 0;
  41. }
  42. static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
  43. const u8 *in_key,
  44. unsigned int key_len)
  45. {
  46. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  47. if (key_len < 3)
  48. return -EINVAL;
  49. key_len -= 3;
  50. memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
  51. return ccm_aes_nx_set_key(tfm, in_key, key_len);
  52. }
  53. static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
  54. unsigned int authsize)
  55. {
  56. switch (authsize) {
  57. case 4:
  58. case 6:
  59. case 8:
  60. case 10:
  61. case 12:
  62. case 14:
  63. case 16:
  64. break;
  65. default:
  66. return -EINVAL;
  67. }
  68. return 0;
  69. }
  70. static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
  71. unsigned int authsize)
  72. {
  73. switch (authsize) {
  74. case 8:
  75. case 12:
  76. case 16:
  77. break;
  78. default:
  79. return -EINVAL;
  80. }
  81. return 0;
  82. }
  83. /* taken from crypto/ccm.c */
  84. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  85. {
  86. __be32 data;
  87. memset(block, 0, csize);
  88. block += csize;
  89. if (csize >= 4)
  90. csize = 4;
  91. else if (msglen > (unsigned int)(1 << (8 * csize)))
  92. return -EOVERFLOW;
  93. data = cpu_to_be32(msglen);
  94. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  95. return 0;
  96. }
  97. /* taken from crypto/ccm.c */
  98. static inline int crypto_ccm_check_iv(const u8 *iv)
  99. {
  100. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  101. if (1 > iv[0] || iv[0] > 7)
  102. return -EINVAL;
  103. return 0;
  104. }
  105. /* based on code from crypto/ccm.c */
  106. static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
  107. unsigned int cryptlen, u8 *b0)
  108. {
  109. unsigned int l, lp, m = authsize;
  110. memcpy(b0, iv, 16);
  111. lp = b0[0];
  112. l = lp + 1;
  113. /* set m, bits 3-5 */
  114. *b0 |= (8 * ((m - 2) / 2));
  115. /* set adata, bit 6, if associated data is used */
  116. if (assoclen)
  117. *b0 |= 64;
  118. return set_msg_len(b0 + 16 - l, cryptlen, l);
  119. }
  120. static int generate_pat(u8 *iv,
  121. struct aead_request *req,
  122. struct nx_crypto_ctx *nx_ctx,
  123. unsigned int authsize,
  124. unsigned int nbytes,
  125. unsigned int assoclen,
  126. u8 *out)
  127. {
  128. struct nx_sg *nx_insg = nx_ctx->in_sg;
  129. struct nx_sg *nx_outsg = nx_ctx->out_sg;
  130. unsigned int iauth_len = 0;
  131. u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
  132. int rc;
  133. unsigned int max_sg_len;
  134. /* zero the ctr value */
  135. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  136. /* page 78 of nx_wb.pdf has,
  137. * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
  138. * in length. If a full message is used, the AES CCA implementation
  139. * restricts the maximum AAD length to 2^32 -1 bytes.
  140. * If partial messages are used, the implementation supports
  141. * 2^64 -1 bytes maximum AAD length.
  142. *
  143. * However, in the cryptoapi's aead_request structure,
  144. * assoclen is an unsigned int, thus it cannot hold a length
  145. * value greater than 2^32 - 1.
  146. * Thus the AAD is further constrained by this and is never
  147. * greater than 2^32.
  148. */
  149. if (!assoclen) {
  150. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  151. } else if (assoclen <= 14) {
  152. /* if associated data is 14 bytes or less, we do 1 GCM
  153. * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
  154. * which is fed in through the source buffers here */
  155. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  156. b1 = nx_ctx->priv.ccm.iauth_tag;
  157. iauth_len = assoclen;
  158. } else if (assoclen <= 65280) {
  159. /* if associated data is less than (2^16 - 2^8), we construct
  160. * B1 differently and feed in the associated data to a CCA
  161. * operation */
  162. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  163. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  164. iauth_len = 14;
  165. } else {
  166. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  167. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  168. iauth_len = 10;
  169. }
  170. /* generate B0 */
  171. rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
  172. if (rc)
  173. return rc;
  174. /* generate B1:
  175. * add control info for associated data
  176. * RFC 3610 and NIST Special Publication 800-38C
  177. */
  178. if (b1) {
  179. memset(b1, 0, 16);
  180. if (assoclen <= 65280) {
  181. *(u16 *)b1 = assoclen;
  182. scatterwalk_map_and_copy(b1 + 2, req->src, 0,
  183. iauth_len, SCATTERWALK_FROM_SG);
  184. } else {
  185. *(u16 *)b1 = (u16)(0xfffe);
  186. *(u32 *)&b1[2] = assoclen;
  187. scatterwalk_map_and_copy(b1 + 6, req->src, 0,
  188. iauth_len, SCATTERWALK_FROM_SG);
  189. }
  190. }
  191. /* now copy any remaining AAD to scatterlist and call nx... */
  192. if (!assoclen) {
  193. return rc;
  194. } else if (assoclen <= 14) {
  195. unsigned int len = 16;
  196. nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
  197. if (len != 16)
  198. return -EINVAL;
  199. nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
  200. nx_ctx->ap->sglen);
  201. if (len != 16)
  202. return -EINVAL;
  203. /* inlen should be negative, indicating to phyp that its a
  204. * pointer to an sg list */
  205. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
  206. sizeof(struct nx_sg);
  207. nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
  208. sizeof(struct nx_sg);
  209. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  210. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
  211. result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
  212. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  213. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  214. if (rc)
  215. return rc;
  216. atomic_inc(&(nx_ctx->stats->aes_ops));
  217. atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
  218. } else {
  219. unsigned int processed = 0, to_process;
  220. processed += iauth_len;
  221. /* page_limit: number of sg entries that fit on one page */
  222. max_sg_len = min_t(u64, nx_ctx->ap->sglen,
  223. nx_driver.of.max_sg_len/sizeof(struct nx_sg));
  224. max_sg_len = min_t(u64, max_sg_len,
  225. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  226. do {
  227. to_process = min_t(u32, assoclen - processed,
  228. nx_ctx->ap->databytelen);
  229. nx_insg = nx_walk_and_build(nx_ctx->in_sg,
  230. nx_ctx->ap->sglen,
  231. req->src, processed,
  232. &to_process);
  233. if ((to_process + processed) < assoclen) {
  234. NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
  235. NX_FDM_INTERMEDIATE;
  236. } else {
  237. NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
  238. ~NX_FDM_INTERMEDIATE;
  239. }
  240. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
  241. sizeof(struct nx_sg);
  242. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  243. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  244. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  245. if (rc)
  246. return rc;
  247. memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
  248. nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
  249. AES_BLOCK_SIZE);
  250. NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
  251. atomic_inc(&(nx_ctx->stats->aes_ops));
  252. atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
  253. processed += to_process;
  254. } while (processed < assoclen);
  255. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  256. }
  257. memcpy(out, result, AES_BLOCK_SIZE);
  258. return rc;
  259. }
  260. static int ccm_nx_decrypt(struct aead_request *req,
  261. u8 *iv,
  262. unsigned int assoclen)
  263. {
  264. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  265. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  266. unsigned int nbytes = req->cryptlen;
  267. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  268. struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
  269. unsigned long irq_flags;
  270. unsigned int processed = 0, to_process;
  271. int rc = -1;
  272. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  273. nbytes -= authsize;
  274. /* copy out the auth tag to compare with later */
  275. scatterwalk_map_and_copy(priv->oauth_tag,
  276. req->src, nbytes + req->assoclen, authsize,
  277. SCATTERWALK_FROM_SG);
  278. rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
  279. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  280. if (rc)
  281. goto out;
  282. do {
  283. /* to_process: the AES_BLOCK_SIZE data chunk to process in this
  284. * update. This value is bound by sg list limits.
  285. */
  286. to_process = nbytes - processed;
  287. if ((to_process + processed) < nbytes)
  288. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  289. else
  290. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  291. NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  292. rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
  293. &to_process, processed + req->assoclen,
  294. csbcpb->cpb.aes_ccm.iv_or_ctr);
  295. if (rc)
  296. goto out;
  297. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  298. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  299. if (rc)
  300. goto out;
  301. /* for partial completion, copy following for next
  302. * entry into loop...
  303. */
  304. memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  305. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  306. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  307. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  308. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  309. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  310. /* update stats */
  311. atomic_inc(&(nx_ctx->stats->aes_ops));
  312. atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
  313. &(nx_ctx->stats->aes_bytes));
  314. processed += to_process;
  315. } while (processed < nbytes);
  316. rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
  317. authsize) ? -EBADMSG : 0;
  318. out:
  319. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  320. return rc;
  321. }
  322. static int ccm_nx_encrypt(struct aead_request *req,
  323. u8 *iv,
  324. unsigned int assoclen)
  325. {
  326. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  327. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  328. unsigned int nbytes = req->cryptlen;
  329. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  330. unsigned long irq_flags;
  331. unsigned int processed = 0, to_process;
  332. int rc = -1;
  333. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  334. rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
  335. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  336. if (rc)
  337. goto out;
  338. do {
  339. /* to process: the AES_BLOCK_SIZE data chunk to process in this
  340. * update. This value is bound by sg list limits.
  341. */
  342. to_process = nbytes - processed;
  343. if ((to_process + processed) < nbytes)
  344. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  345. else
  346. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  347. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  348. rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
  349. &to_process, processed + req->assoclen,
  350. csbcpb->cpb.aes_ccm.iv_or_ctr);
  351. if (rc)
  352. goto out;
  353. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  354. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  355. if (rc)
  356. goto out;
  357. /* for partial completion, copy following for next
  358. * entry into loop...
  359. */
  360. memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  361. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  362. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  363. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  364. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  365. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  366. /* update stats */
  367. atomic_inc(&(nx_ctx->stats->aes_ops));
  368. atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
  369. &(nx_ctx->stats->aes_bytes));
  370. processed += to_process;
  371. } while (processed < nbytes);
  372. /* copy out the auth tag */
  373. scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
  374. req->dst, nbytes + req->assoclen, authsize,
  375. SCATTERWALK_TO_SG);
  376. out:
  377. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  378. return rc;
  379. }
  380. static int ccm4309_aes_nx_encrypt(struct aead_request *req)
  381. {
  382. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  383. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  384. u8 *iv = rctx->iv;
  385. iv[0] = 3;
  386. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  387. memcpy(iv + 4, req->iv, 8);
  388. return ccm_nx_encrypt(req, iv, req->assoclen - 8);
  389. }
  390. static int ccm_aes_nx_encrypt(struct aead_request *req)
  391. {
  392. int rc;
  393. rc = crypto_ccm_check_iv(req->iv);
  394. if (rc)
  395. return rc;
  396. return ccm_nx_encrypt(req, req->iv, req->assoclen);
  397. }
  398. static int ccm4309_aes_nx_decrypt(struct aead_request *req)
  399. {
  400. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  401. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  402. u8 *iv = rctx->iv;
  403. iv[0] = 3;
  404. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  405. memcpy(iv + 4, req->iv, 8);
  406. return ccm_nx_decrypt(req, iv, req->assoclen - 8);
  407. }
  408. static int ccm_aes_nx_decrypt(struct aead_request *req)
  409. {
  410. int rc;
  411. rc = crypto_ccm_check_iv(req->iv);
  412. if (rc)
  413. return rc;
  414. return ccm_nx_decrypt(req, req->iv, req->assoclen);
  415. }
  416. struct aead_alg nx_ccm_aes_alg = {
  417. .base = {
  418. .cra_name = "ccm(aes)",
  419. .cra_driver_name = "ccm-aes-nx",
  420. .cra_priority = 300,
  421. .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  422. .cra_blocksize = 1,
  423. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  424. .cra_module = THIS_MODULE,
  425. },
  426. .init = nx_crypto_ctx_aes_ccm_init,
  427. .exit = nx_crypto_ctx_aead_exit,
  428. .ivsize = AES_BLOCK_SIZE,
  429. .maxauthsize = AES_BLOCK_SIZE,
  430. .setkey = ccm_aes_nx_set_key,
  431. .setauthsize = ccm_aes_nx_setauthsize,
  432. .encrypt = ccm_aes_nx_encrypt,
  433. .decrypt = ccm_aes_nx_decrypt,
  434. };
  435. struct aead_alg nx_ccm4309_aes_alg = {
  436. .base = {
  437. .cra_name = "rfc4309(ccm(aes))",
  438. .cra_driver_name = "rfc4309-ccm-aes-nx",
  439. .cra_priority = 300,
  440. .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  441. .cra_blocksize = 1,
  442. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  443. .cra_module = THIS_MODULE,
  444. },
  445. .init = nx_crypto_ctx_aes_ccm_init,
  446. .exit = nx_crypto_ctx_aead_exit,
  447. .ivsize = 8,
  448. .maxauthsize = AES_BLOCK_SIZE,
  449. .setkey = ccm4309_aes_nx_set_key,
  450. .setauthsize = ccm4309_aes_nx_setauthsize,
  451. .encrypt = ccm4309_aes_nx_encrypt,
  452. .decrypt = ccm4309_aes_nx_decrypt,
  453. };