nx-aes-gcm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AES GCM routines supporting the Power 7+ Nest Accelerators driver
  4. *
  5. * Copyright (C) 2012 International Business Machines Inc.
  6. *
  7. * Author: Kent Yoder <[email protected]>
  8. */
  9. #include <crypto/internal/aead.h>
  10. #include <crypto/aes.h>
  11. #include <crypto/algapi.h>
  12. #include <crypto/gcm.h>
  13. #include <crypto/scatterwalk.h>
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <asm/vio.h>
  17. #include "nx_csbcpb.h"
  18. #include "nx.h"
  19. static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
  20. const u8 *in_key,
  21. unsigned int key_len)
  22. {
  23. struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
  24. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  25. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  26. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  27. switch (key_len) {
  28. case AES_KEYSIZE_128:
  29. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  30. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  31. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  32. break;
  33. case AES_KEYSIZE_192:
  34. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
  35. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
  36. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
  37. break;
  38. case AES_KEYSIZE_256:
  39. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
  40. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
  41. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
  42. break;
  43. default:
  44. return -EINVAL;
  45. }
  46. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  47. memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
  48. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
  49. memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
  50. return 0;
  51. }
  52. static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
  53. const u8 *in_key,
  54. unsigned int key_len)
  55. {
  56. struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
  57. char *nonce = nx_ctx->priv.gcm.nonce;
  58. int rc;
  59. if (key_len < 4)
  60. return -EINVAL;
  61. key_len -= 4;
  62. rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
  63. if (rc)
  64. goto out;
  65. memcpy(nonce, in_key + key_len, 4);
  66. out:
  67. return rc;
  68. }
  69. static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
  70. unsigned int authsize)
  71. {
  72. switch (authsize) {
  73. case 8:
  74. case 12:
  75. case 16:
  76. break;
  77. default:
  78. return -EINVAL;
  79. }
  80. return 0;
  81. }
  82. static int nx_gca(struct nx_crypto_ctx *nx_ctx,
  83. struct aead_request *req,
  84. u8 *out,
  85. unsigned int assoclen)
  86. {
  87. int rc;
  88. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  89. struct scatter_walk walk;
  90. struct nx_sg *nx_sg = nx_ctx->in_sg;
  91. unsigned int nbytes = assoclen;
  92. unsigned int processed = 0, to_process;
  93. unsigned int max_sg_len;
  94. if (nbytes <= AES_BLOCK_SIZE) {
  95. scatterwalk_start(&walk, req->src);
  96. scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
  97. scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
  98. return 0;
  99. }
  100. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
  101. /* page_limit: number of sg entries that fit on one page */
  102. max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  103. nx_ctx->ap->sglen);
  104. max_sg_len = min_t(u64, max_sg_len,
  105. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  106. do {
  107. /*
  108. * to_process: the data chunk to process in this update.
  109. * This value is bound by sg list limits.
  110. */
  111. to_process = min_t(u64, nbytes - processed,
  112. nx_ctx->ap->databytelen);
  113. to_process = min_t(u64, to_process,
  114. NX_PAGE_SIZE * (max_sg_len - 1));
  115. nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
  116. req->src, processed, &to_process);
  117. if ((to_process + processed) < nbytes)
  118. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
  119. else
  120. NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
  121. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
  122. * sizeof(struct nx_sg);
  123. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  124. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  125. if (rc)
  126. return rc;
  127. memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
  128. csbcpb_aead->cpb.aes_gca.out_pat,
  129. AES_BLOCK_SIZE);
  130. NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
  131. atomic_inc(&(nx_ctx->stats->aes_ops));
  132. atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
  133. processed += to_process;
  134. } while (processed < nbytes);
  135. memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
  136. return rc;
  137. }
  138. static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
  139. {
  140. int rc;
  141. struct nx_crypto_ctx *nx_ctx =
  142. crypto_aead_ctx(crypto_aead_reqtfm(req));
  143. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  144. struct nx_sg *nx_sg;
  145. unsigned int nbytes = assoclen;
  146. unsigned int processed = 0, to_process;
  147. unsigned int max_sg_len;
  148. /* Set GMAC mode */
  149. csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
  150. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  151. /* page_limit: number of sg entries that fit on one page */
  152. max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
  153. nx_ctx->ap->sglen);
  154. max_sg_len = min_t(u64, max_sg_len,
  155. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  156. /* Copy IV */
  157. memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
  158. do {
  159. /*
  160. * to_process: the data chunk to process in this update.
  161. * This value is bound by sg list limits.
  162. */
  163. to_process = min_t(u64, nbytes - processed,
  164. nx_ctx->ap->databytelen);
  165. to_process = min_t(u64, to_process,
  166. NX_PAGE_SIZE * (max_sg_len - 1));
  167. nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
  168. req->src, processed, &to_process);
  169. if ((to_process + processed) < nbytes)
  170. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  171. else
  172. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  173. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
  174. * sizeof(struct nx_sg);
  175. csbcpb->cpb.aes_gcm.bit_length_data = 0;
  176. csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
  177. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  178. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  179. if (rc)
  180. goto out;
  181. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  182. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  183. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  184. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  185. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  186. atomic_inc(&(nx_ctx->stats->aes_ops));
  187. atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
  188. processed += to_process;
  189. } while (processed < nbytes);
  190. out:
  191. /* Restore GCM mode */
  192. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  193. return rc;
  194. }
  195. static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
  196. {
  197. int rc;
  198. struct nx_crypto_ctx *nx_ctx =
  199. crypto_aead_ctx(crypto_aead_reqtfm(req));
  200. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  201. char out[AES_BLOCK_SIZE];
  202. struct nx_sg *in_sg, *out_sg;
  203. int len;
  204. /* For scenarios where the input message is zero length, AES CTR mode
  205. * may be used. Set the source data to be a single block (16B) of all
  206. * zeros, and set the input IV value to be the same as the GMAC IV
  207. * value. - nx_wb 4.8.1.3 */
  208. /* Change to ECB mode */
  209. csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
  210. memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
  211. sizeof(csbcpb->cpb.aes_ecb.key));
  212. if (enc)
  213. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  214. else
  215. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  216. len = AES_BLOCK_SIZE;
  217. /* Encrypt the counter/IV */
  218. in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
  219. &len, nx_ctx->ap->sglen);
  220. if (len != AES_BLOCK_SIZE)
  221. return -EINVAL;
  222. len = sizeof(out);
  223. out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
  224. nx_ctx->ap->sglen);
  225. if (len != sizeof(out))
  226. return -EINVAL;
  227. nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
  228. nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
  229. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  230. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  231. if (rc)
  232. goto out;
  233. atomic_inc(&(nx_ctx->stats->aes_ops));
  234. /* Copy out the auth tag */
  235. memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
  236. crypto_aead_authsize(crypto_aead_reqtfm(req)));
  237. out:
  238. /* Restore XCBC mode */
  239. csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
  240. /*
  241. * ECB key uses the same region that GCM AAD and counter, so it's safe
  242. * to just fill it with zeroes.
  243. */
  244. memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
  245. return rc;
  246. }
  247. static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
  248. unsigned int assoclen)
  249. {
  250. struct nx_crypto_ctx *nx_ctx =
  251. crypto_aead_ctx(crypto_aead_reqtfm(req));
  252. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  253. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  254. unsigned int nbytes = req->cryptlen;
  255. unsigned int processed = 0, to_process;
  256. unsigned long irq_flags;
  257. int rc = -EINVAL;
  258. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  259. /* initialize the counter */
  260. *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
  261. if (nbytes == 0) {
  262. if (assoclen == 0)
  263. rc = gcm_empty(req, rctx->iv, enc);
  264. else
  265. rc = gmac(req, rctx->iv, assoclen);
  266. if (rc)
  267. goto out;
  268. else
  269. goto mac;
  270. }
  271. /* Process associated data */
  272. csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
  273. if (assoclen) {
  274. rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
  275. assoclen);
  276. if (rc)
  277. goto out;
  278. }
  279. /* Set flags for encryption */
  280. NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
  281. if (enc) {
  282. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  283. } else {
  284. NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  285. nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
  286. }
  287. do {
  288. to_process = nbytes - processed;
  289. csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
  290. rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
  291. req->src, &to_process,
  292. processed + req->assoclen,
  293. csbcpb->cpb.aes_gcm.iv_or_cnt);
  294. if (rc)
  295. goto out;
  296. if ((to_process + processed) < nbytes)
  297. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  298. else
  299. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  300. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  301. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  302. if (rc)
  303. goto out;
  304. memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
  305. memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
  306. csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
  307. memcpy(csbcpb->cpb.aes_gcm.in_s0,
  308. csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
  309. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  310. atomic_inc(&(nx_ctx->stats->aes_ops));
  311. atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
  312. &(nx_ctx->stats->aes_bytes));
  313. processed += to_process;
  314. } while (processed < nbytes);
  315. mac:
  316. if (enc) {
  317. /* copy out the auth tag */
  318. scatterwalk_map_and_copy(
  319. csbcpb->cpb.aes_gcm.out_pat_or_mac,
  320. req->dst, req->assoclen + nbytes,
  321. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  322. SCATTERWALK_TO_SG);
  323. } else {
  324. u8 *itag = nx_ctx->priv.gcm.iauth_tag;
  325. u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
  326. scatterwalk_map_and_copy(
  327. itag, req->src, req->assoclen + nbytes,
  328. crypto_aead_authsize(crypto_aead_reqtfm(req)),
  329. SCATTERWALK_FROM_SG);
  330. rc = crypto_memneq(itag, otag,
  331. crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
  332. -EBADMSG : 0;
  333. }
  334. out:
  335. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  336. return rc;
  337. }
  338. static int gcm_aes_nx_encrypt(struct aead_request *req)
  339. {
  340. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  341. char *iv = rctx->iv;
  342. memcpy(iv, req->iv, GCM_AES_IV_SIZE);
  343. return gcm_aes_nx_crypt(req, 1, req->assoclen);
  344. }
  345. static int gcm_aes_nx_decrypt(struct aead_request *req)
  346. {
  347. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  348. char *iv = rctx->iv;
  349. memcpy(iv, req->iv, GCM_AES_IV_SIZE);
  350. return gcm_aes_nx_crypt(req, 0, req->assoclen);
  351. }
  352. static int gcm4106_aes_nx_encrypt(struct aead_request *req)
  353. {
  354. struct nx_crypto_ctx *nx_ctx =
  355. crypto_aead_ctx(crypto_aead_reqtfm(req));
  356. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  357. char *iv = rctx->iv;
  358. char *nonce = nx_ctx->priv.gcm.nonce;
  359. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  360. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  361. if (req->assoclen < 8)
  362. return -EINVAL;
  363. return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
  364. }
  365. static int gcm4106_aes_nx_decrypt(struct aead_request *req)
  366. {
  367. struct nx_crypto_ctx *nx_ctx =
  368. crypto_aead_ctx(crypto_aead_reqtfm(req));
  369. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  370. char *iv = rctx->iv;
  371. char *nonce = nx_ctx->priv.gcm.nonce;
  372. memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
  373. memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
  374. if (req->assoclen < 8)
  375. return -EINVAL;
  376. return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
  377. }
  378. struct aead_alg nx_gcm_aes_alg = {
  379. .base = {
  380. .cra_name = "gcm(aes)",
  381. .cra_driver_name = "gcm-aes-nx",
  382. .cra_priority = 300,
  383. .cra_blocksize = 1,
  384. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  385. .cra_module = THIS_MODULE,
  386. },
  387. .init = nx_crypto_ctx_aes_gcm_init,
  388. .exit = nx_crypto_ctx_aead_exit,
  389. .ivsize = GCM_AES_IV_SIZE,
  390. .maxauthsize = AES_BLOCK_SIZE,
  391. .setkey = gcm_aes_nx_set_key,
  392. .encrypt = gcm_aes_nx_encrypt,
  393. .decrypt = gcm_aes_nx_decrypt,
  394. };
  395. struct aead_alg nx_gcm4106_aes_alg = {
  396. .base = {
  397. .cra_name = "rfc4106(gcm(aes))",
  398. .cra_driver_name = "rfc4106-gcm-aes-nx",
  399. .cra_priority = 300,
  400. .cra_blocksize = 1,
  401. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  402. .cra_module = THIS_MODULE,
  403. },
  404. .init = nx_crypto_ctx_aes_gcm_init,
  405. .exit = nx_crypto_ctx_aead_exit,
  406. .ivsize = GCM_RFC4106_IV_SIZE,
  407. .maxauthsize = AES_BLOCK_SIZE,
  408. .setkey = gcm4106_aes_nx_set_key,
  409. .setauthsize = gcm4106_aes_nx_setauthsize,
  410. .encrypt = gcm4106_aes_nx_encrypt,
  411. .decrypt = gcm4106_aes_nx_decrypt,
  412. };