sun8i-ce-cipher.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sun8i-ce-cipher.c - hardware cryptographic offloader for
  4. * Allwinner H3/A64/H5/H2+/H6/R40 SoC
  5. *
  6. * Copyright (C) 2016-2019 Corentin LABBE <[email protected]>
  7. *
  8. * This file add support for AES cipher with 128,192,256 bits keysize in
  9. * CBC and ECB mode.
  10. *
  11. * You could find a link for the datasheet in Documentation/arm/sunxi.rst
  12. */
  13. #include <linux/bottom_half.h>
  14. #include <linux/crypto.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/io.h>
  17. #include <linux/pm_runtime.h>
  18. #include <crypto/scatterwalk.h>
  19. #include <crypto/internal/des.h>
  20. #include <crypto/internal/skcipher.h>
  21. #include "sun8i-ce.h"
  22. static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
  23. {
  24. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  25. struct scatterlist *sg;
  26. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  27. struct sun8i_ce_alg_template *algt;
  28. unsigned int todo, len;
  29. algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
  30. if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
  31. sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
  32. algt->stat_fb_maxsg++;
  33. return true;
  34. }
  35. if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
  36. algt->stat_fb_leniv++;
  37. return true;
  38. }
  39. if (areq->cryptlen == 0) {
  40. algt->stat_fb_len0++;
  41. return true;
  42. }
  43. if (areq->cryptlen % 16) {
  44. algt->stat_fb_mod16++;
  45. return true;
  46. }
  47. len = areq->cryptlen;
  48. sg = areq->src;
  49. while (sg) {
  50. if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
  51. algt->stat_fb_srcali++;
  52. return true;
  53. }
  54. todo = min(len, sg->length);
  55. if (todo % 4) {
  56. algt->stat_fb_srclen++;
  57. return true;
  58. }
  59. len -= todo;
  60. sg = sg_next(sg);
  61. }
  62. len = areq->cryptlen;
  63. sg = areq->dst;
  64. while (sg) {
  65. if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
  66. algt->stat_fb_dstali++;
  67. return true;
  68. }
  69. todo = min(len, sg->length);
  70. if (todo % 4) {
  71. algt->stat_fb_dstlen++;
  72. return true;
  73. }
  74. len -= todo;
  75. sg = sg_next(sg);
  76. }
  77. return false;
  78. }
  79. static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
  80. {
  81. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  82. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  83. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  84. int err;
  85. #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
  86. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  87. struct sun8i_ce_alg_template *algt;
  88. algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
  89. algt->stat_fb++;
  90. #endif
  91. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  92. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  93. areq->base.complete, areq->base.data);
  94. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  95. areq->cryptlen, areq->iv);
  96. if (rctx->op_dir & CE_DECRYPTION)
  97. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  98. else
  99. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  100. return err;
  101. }
  102. static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
  103. {
  104. struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
  105. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  106. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  107. struct sun8i_ce_dev *ce = op->ce;
  108. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  109. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  110. struct sun8i_ce_alg_template *algt;
  111. struct sun8i_ce_flow *chan;
  112. struct ce_task *cet;
  113. struct scatterlist *sg;
  114. unsigned int todo, len, offset, ivsize;
  115. u32 common, sym;
  116. int flow, i;
  117. int nr_sgs = 0;
  118. int nr_sgd = 0;
  119. int err = 0;
  120. int ns = sg_nents_for_len(areq->src, areq->cryptlen);
  121. int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
  122. algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
  123. dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
  124. crypto_tfm_alg_name(areq->base.tfm),
  125. areq->cryptlen,
  126. rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
  127. op->keylen);
  128. #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
  129. algt->stat_req++;
  130. #endif
  131. flow = rctx->flow;
  132. chan = &ce->chanlist[flow];
  133. cet = chan->tl;
  134. memset(cet, 0, sizeof(struct ce_task));
  135. cet->t_id = cpu_to_le32(flow);
  136. common = ce->variant->alg_cipher[algt->ce_algo_id];
  137. common |= rctx->op_dir | CE_COMM_INT;
  138. cet->t_common_ctl = cpu_to_le32(common);
  139. /* CTS and recent CE (H6) need length in bytes, in word otherwise */
  140. if (ce->variant->cipher_t_dlen_in_bytes)
  141. cet->t_dlen = cpu_to_le32(areq->cryptlen);
  142. else
  143. cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
  144. sym = ce->variant->op_mode[algt->ce_blockmode];
  145. len = op->keylen;
  146. switch (len) {
  147. case 128 / 8:
  148. sym |= CE_AES_128BITS;
  149. break;
  150. case 192 / 8:
  151. sym |= CE_AES_192BITS;
  152. break;
  153. case 256 / 8:
  154. sym |= CE_AES_256BITS;
  155. break;
  156. }
  157. cet->t_sym_ctl = cpu_to_le32(sym);
  158. cet->t_asym_ctl = 0;
  159. rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
  160. if (dma_mapping_error(ce->dev, rctx->addr_key)) {
  161. dev_err(ce->dev, "Cannot DMA MAP KEY\n");
  162. err = -EFAULT;
  163. goto theend;
  164. }
  165. cet->t_key = cpu_to_le32(rctx->addr_key);
  166. ivsize = crypto_skcipher_ivsize(tfm);
  167. if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
  168. rctx->ivlen = ivsize;
  169. if (rctx->op_dir & CE_DECRYPTION) {
  170. offset = areq->cryptlen - ivsize;
  171. scatterwalk_map_and_copy(chan->backup_iv, areq->src,
  172. offset, ivsize, 0);
  173. }
  174. memcpy(chan->bounce_iv, areq->iv, ivsize);
  175. rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
  176. DMA_TO_DEVICE);
  177. if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
  178. dev_err(ce->dev, "Cannot DMA MAP IV\n");
  179. err = -ENOMEM;
  180. goto theend_iv;
  181. }
  182. cet->t_iv = cpu_to_le32(rctx->addr_iv);
  183. }
  184. if (areq->src == areq->dst) {
  185. nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
  186. if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
  187. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
  188. err = -EINVAL;
  189. goto theend_iv;
  190. }
  191. nr_sgd = nr_sgs;
  192. } else {
  193. nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
  194. if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
  195. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
  196. err = -EINVAL;
  197. goto theend_iv;
  198. }
  199. nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
  200. if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
  201. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
  202. err = -EINVAL;
  203. goto theend_sgs;
  204. }
  205. }
  206. len = areq->cryptlen;
  207. for_each_sg(areq->src, sg, nr_sgs, i) {
  208. cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
  209. todo = min(len, sg_dma_len(sg));
  210. cet->t_src[i].len = cpu_to_le32(todo / 4);
  211. dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
  212. areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
  213. len -= todo;
  214. }
  215. if (len > 0) {
  216. dev_err(ce->dev, "remaining len %d\n", len);
  217. err = -EINVAL;
  218. goto theend_sgs;
  219. }
  220. len = areq->cryptlen;
  221. for_each_sg(areq->dst, sg, nr_sgd, i) {
  222. cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
  223. todo = min(len, sg_dma_len(sg));
  224. cet->t_dst[i].len = cpu_to_le32(todo / 4);
  225. dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
  226. areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
  227. len -= todo;
  228. }
  229. if (len > 0) {
  230. dev_err(ce->dev, "remaining len %d\n", len);
  231. err = -EINVAL;
  232. goto theend_sgs;
  233. }
  234. chan->timeout = areq->cryptlen;
  235. rctx->nr_sgs = nr_sgs;
  236. rctx->nr_sgd = nr_sgd;
  237. return 0;
  238. theend_sgs:
  239. if (areq->src == areq->dst) {
  240. dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
  241. } else {
  242. if (nr_sgs > 0)
  243. dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
  244. dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
  245. }
  246. theend_iv:
  247. if (areq->iv && ivsize > 0) {
  248. if (rctx->addr_iv)
  249. dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
  250. offset = areq->cryptlen - ivsize;
  251. if (rctx->op_dir & CE_DECRYPTION) {
  252. memcpy(areq->iv, chan->backup_iv, ivsize);
  253. memzero_explicit(chan->backup_iv, ivsize);
  254. } else {
  255. scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
  256. ivsize, 0);
  257. }
  258. memzero_explicit(chan->bounce_iv, ivsize);
  259. }
  260. dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
  261. theend:
  262. return err;
  263. }
  264. static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
  265. {
  266. struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
  267. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
  268. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  269. struct sun8i_ce_dev *ce = op->ce;
  270. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
  271. int flow, err;
  272. flow = rctx->flow;
  273. err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
  274. local_bh_disable();
  275. crypto_finalize_skcipher_request(engine, breq, err);
  276. local_bh_enable();
  277. return 0;
  278. }
  279. static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
  280. {
  281. struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
  282. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  283. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  284. struct sun8i_ce_dev *ce = op->ce;
  285. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  286. struct sun8i_ce_flow *chan;
  287. struct ce_task *cet;
  288. unsigned int ivsize, offset;
  289. int nr_sgs = rctx->nr_sgs;
  290. int nr_sgd = rctx->nr_sgd;
  291. int flow;
  292. flow = rctx->flow;
  293. chan = &ce->chanlist[flow];
  294. cet = chan->tl;
  295. ivsize = crypto_skcipher_ivsize(tfm);
  296. if (areq->src == areq->dst) {
  297. dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
  298. } else {
  299. if (nr_sgs > 0)
  300. dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
  301. dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
  302. }
  303. if (areq->iv && ivsize > 0) {
  304. if (cet->t_iv)
  305. dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
  306. offset = areq->cryptlen - ivsize;
  307. if (rctx->op_dir & CE_DECRYPTION) {
  308. memcpy(areq->iv, chan->backup_iv, ivsize);
  309. memzero_explicit(chan->backup_iv, ivsize);
  310. } else {
  311. scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
  312. ivsize, 0);
  313. }
  314. memzero_explicit(chan->bounce_iv, ivsize);
  315. }
  316. dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
  317. return 0;
  318. }
  319. int sun8i_ce_skdecrypt(struct skcipher_request *areq)
  320. {
  321. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  322. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  323. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  324. struct crypto_engine *engine;
  325. int e;
  326. rctx->op_dir = CE_DECRYPTION;
  327. if (sun8i_ce_cipher_need_fallback(areq))
  328. return sun8i_ce_cipher_fallback(areq);
  329. e = sun8i_ce_get_engine_number(op->ce);
  330. rctx->flow = e;
  331. engine = op->ce->chanlist[e].engine;
  332. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  333. }
  334. int sun8i_ce_skencrypt(struct skcipher_request *areq)
  335. {
  336. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  337. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  338. struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  339. struct crypto_engine *engine;
  340. int e;
  341. rctx->op_dir = CE_ENCRYPTION;
  342. if (sun8i_ce_cipher_need_fallback(areq))
  343. return sun8i_ce_cipher_fallback(areq);
  344. e = sun8i_ce_get_engine_number(op->ce);
  345. rctx->flow = e;
  346. engine = op->ce->chanlist[e].engine;
  347. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  348. }
  349. int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
  350. {
  351. struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  352. struct sun8i_ce_alg_template *algt;
  353. const char *name = crypto_tfm_alg_name(tfm);
  354. struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
  355. struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
  356. int err;
  357. memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
  358. algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
  359. op->ce = algt->ce;
  360. op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  361. if (IS_ERR(op->fallback_tfm)) {
  362. dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  363. name, PTR_ERR(op->fallback_tfm));
  364. return PTR_ERR(op->fallback_tfm);
  365. }
  366. sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
  367. crypto_skcipher_reqsize(op->fallback_tfm);
  368. memcpy(algt->fbname,
  369. crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
  370. CRYPTO_MAX_ALG_NAME);
  371. op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
  372. op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
  373. op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
  374. err = pm_runtime_get_sync(op->ce->dev);
  375. if (err < 0)
  376. goto error_pm;
  377. return 0;
  378. error_pm:
  379. pm_runtime_put_noidle(op->ce->dev);
  380. crypto_free_skcipher(op->fallback_tfm);
  381. return err;
  382. }
  383. void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
  384. {
  385. struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  386. kfree_sensitive(op->key);
  387. crypto_free_skcipher(op->fallback_tfm);
  388. pm_runtime_put_sync_suspend(op->ce->dev);
  389. }
  390. int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  391. unsigned int keylen)
  392. {
  393. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  394. struct sun8i_ce_dev *ce = op->ce;
  395. switch (keylen) {
  396. case 128 / 8:
  397. break;
  398. case 192 / 8:
  399. break;
  400. case 256 / 8:
  401. break;
  402. default:
  403. dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
  404. return -EINVAL;
  405. }
  406. kfree_sensitive(op->key);
  407. op->keylen = keylen;
  408. op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
  409. if (!op->key)
  410. return -ENOMEM;
  411. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  412. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  413. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  414. }
  415. int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
  416. unsigned int keylen)
  417. {
  418. struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  419. int err;
  420. err = verify_skcipher_des3_key(tfm, key);
  421. if (err)
  422. return err;
  423. kfree_sensitive(op->key);
  424. op->keylen = keylen;
  425. op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
  426. if (!op->key)
  427. return -ENOMEM;
  428. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  429. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  430. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  431. }