sl3516-ce-cipher.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
  4. *
  5. * Copyright (C) 2021 Corentin LABBE <[email protected]>
  6. *
  7. * This file adds support for AES cipher with 128,192,256 bits keysize in
  8. * ECB mode.
  9. */
  10. #include <linux/crypto.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/delay.h>
  13. #include <linux/io.h>
  14. #include <linux/pm_runtime.h>
  15. #include <crypto/scatterwalk.h>
  16. #include <crypto/internal/skcipher.h>
  17. #include "sl3516-ce.h"
  18. /* sl3516_ce_need_fallback - check if a request can be handled by the CE */
  19. static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
  20. {
  21. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  22. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  23. struct sl3516_ce_dev *ce = op->ce;
  24. struct scatterlist *in_sg;
  25. struct scatterlist *out_sg;
  26. struct scatterlist *sg;
  27. if (areq->cryptlen == 0 || areq->cryptlen % 16) {
  28. ce->fallback_mod16++;
  29. return true;
  30. }
  31. /*
  32. * check if we have enough descriptors for TX
  33. * Note: TX need one control desc for each SG
  34. */
  35. if (sg_nents(areq->src) > MAXDESC / 2) {
  36. ce->fallback_sg_count_tx++;
  37. return true;
  38. }
  39. /* check if we have enough descriptors for RX */
  40. if (sg_nents(areq->dst) > MAXDESC) {
  41. ce->fallback_sg_count_rx++;
  42. return true;
  43. }
  44. sg = areq->src;
  45. while (sg) {
  46. if ((sg->length % 16) != 0) {
  47. ce->fallback_mod16++;
  48. return true;
  49. }
  50. if ((sg_dma_len(sg) % 16) != 0) {
  51. ce->fallback_mod16++;
  52. return true;
  53. }
  54. if (!IS_ALIGNED(sg->offset, 16)) {
  55. ce->fallback_align16++;
  56. return true;
  57. }
  58. sg = sg_next(sg);
  59. }
  60. sg = areq->dst;
  61. while (sg) {
  62. if ((sg->length % 16) != 0) {
  63. ce->fallback_mod16++;
  64. return true;
  65. }
  66. if ((sg_dma_len(sg) % 16) != 0) {
  67. ce->fallback_mod16++;
  68. return true;
  69. }
  70. if (!IS_ALIGNED(sg->offset, 16)) {
  71. ce->fallback_align16++;
  72. return true;
  73. }
  74. sg = sg_next(sg);
  75. }
  76. /* need same numbers of SG (with same length) for source and destination */
  77. in_sg = areq->src;
  78. out_sg = areq->dst;
  79. while (in_sg && out_sg) {
  80. if (in_sg->length != out_sg->length) {
  81. ce->fallback_not_same_len++;
  82. return true;
  83. }
  84. in_sg = sg_next(in_sg);
  85. out_sg = sg_next(out_sg);
  86. }
  87. if (in_sg || out_sg)
  88. return true;
  89. return false;
  90. }
  91. static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
  92. {
  93. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  94. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  95. struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  96. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  97. struct sl3516_ce_alg_template *algt;
  98. int err;
  99. algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
  100. algt->stat_fb++;
  101. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  102. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  103. areq->base.complete, areq->base.data);
  104. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  105. areq->cryptlen, areq->iv);
  106. if (rctx->op_dir == CE_DECRYPTION)
  107. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  108. else
  109. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  110. return err;
  111. }
  112. static int sl3516_ce_cipher(struct skcipher_request *areq)
  113. {
  114. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  115. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  116. struct sl3516_ce_dev *ce = op->ce;
  117. struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  118. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  119. struct sl3516_ce_alg_template *algt;
  120. struct scatterlist *sg;
  121. unsigned int todo, len;
  122. struct pkt_control_ecb *ecb;
  123. int nr_sgs = 0;
  124. int nr_sgd = 0;
  125. int err = 0;
  126. int i;
  127. algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
  128. dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
  129. crypto_tfm_alg_name(areq->base.tfm),
  130. areq->cryptlen,
  131. rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
  132. op->keylen);
  133. algt->stat_req++;
  134. if (areq->src == areq->dst) {
  135. nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
  136. DMA_BIDIRECTIONAL);
  137. if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
  138. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
  139. err = -EINVAL;
  140. goto theend;
  141. }
  142. nr_sgd = nr_sgs;
  143. } else {
  144. nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
  145. DMA_TO_DEVICE);
  146. if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
  147. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
  148. err = -EINVAL;
  149. goto theend;
  150. }
  151. nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
  152. DMA_FROM_DEVICE);
  153. if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
  154. dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
  155. err = -EINVAL;
  156. goto theend_sgs;
  157. }
  158. }
  159. len = areq->cryptlen;
  160. i = 0;
  161. sg = areq->src;
  162. while (i < nr_sgs && sg && len) {
  163. if (sg_dma_len(sg) == 0)
  164. goto sgs_next;
  165. rctx->t_src[i].addr = sg_dma_address(sg);
  166. todo = min(len, sg_dma_len(sg));
  167. rctx->t_src[i].len = todo;
  168. dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
  169. areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
  170. len -= todo;
  171. i++;
  172. sgs_next:
  173. sg = sg_next(sg);
  174. }
  175. if (len > 0) {
  176. dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
  177. err = -EINVAL;
  178. goto theend_sgs;
  179. }
  180. len = areq->cryptlen;
  181. i = 0;
  182. sg = areq->dst;
  183. while (i < nr_sgd && sg && len) {
  184. if (sg_dma_len(sg) == 0)
  185. goto sgd_next;
  186. rctx->t_dst[i].addr = sg_dma_address(sg);
  187. todo = min(len, sg_dma_len(sg));
  188. rctx->t_dst[i].len = todo;
  189. dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
  190. areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
  191. len -= todo;
  192. i++;
  193. sgd_next:
  194. sg = sg_next(sg);
  195. }
  196. if (len > 0) {
  197. dev_err(ce->dev, "remaining len %d\n", len);
  198. err = -EINVAL;
  199. goto theend_sgs;
  200. }
  201. switch (algt->mode) {
  202. case ECB_AES:
  203. rctx->pctrllen = sizeof(struct pkt_control_ecb);
  204. ecb = (struct pkt_control_ecb *)ce->pctrl;
  205. rctx->tqflag = TQ0_TYPE_CTRL;
  206. rctx->tqflag |= TQ1_CIPHER;
  207. ecb->control.op_mode = rctx->op_dir;
  208. ecb->control.cipher_algorithm = ECB_AES;
  209. ecb->cipher.header_len = 0;
  210. ecb->cipher.algorithm_len = areq->cryptlen;
  211. cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
  212. rctx->h = &ecb->cipher;
  213. rctx->tqflag |= TQ4_KEY0;
  214. rctx->tqflag |= TQ5_KEY4;
  215. rctx->tqflag |= TQ6_KEY6;
  216. ecb->control.aesnk = op->keylen / 4;
  217. break;
  218. }
  219. rctx->nr_sgs = nr_sgs;
  220. rctx->nr_sgd = nr_sgd;
  221. err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
  222. theend_sgs:
  223. if (areq->src == areq->dst) {
  224. dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
  225. DMA_BIDIRECTIONAL);
  226. } else {
  227. dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
  228. DMA_TO_DEVICE);
  229. dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
  230. DMA_FROM_DEVICE);
  231. }
  232. theend:
  233. return err;
  234. }
  235. static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
  236. {
  237. int err;
  238. struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
  239. err = sl3516_ce_cipher(breq);
  240. local_bh_disable();
  241. crypto_finalize_skcipher_request(engine, breq, err);
  242. local_bh_enable();
  243. return 0;
  244. }
  245. int sl3516_ce_skdecrypt(struct skcipher_request *areq)
  246. {
  247. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  248. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  249. struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  250. struct crypto_engine *engine;
  251. memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
  252. rctx->op_dir = CE_DECRYPTION;
  253. if (sl3516_ce_need_fallback(areq))
  254. return sl3516_ce_cipher_fallback(areq);
  255. engine = op->ce->engine;
  256. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  257. }
  258. int sl3516_ce_skencrypt(struct skcipher_request *areq)
  259. {
  260. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  261. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  262. struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  263. struct crypto_engine *engine;
  264. memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
  265. rctx->op_dir = CE_ENCRYPTION;
  266. if (sl3516_ce_need_fallback(areq))
  267. return sl3516_ce_cipher_fallback(areq);
  268. engine = op->ce->engine;
  269. return crypto_transfer_skcipher_request_to_engine(engine, areq);
  270. }
  271. int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
  272. {
  273. struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  274. struct sl3516_ce_alg_template *algt;
  275. const char *name = crypto_tfm_alg_name(tfm);
  276. struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
  277. struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
  278. int err;
  279. memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
  280. algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
  281. op->ce = algt->ce;
  282. op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  283. if (IS_ERR(op->fallback_tfm)) {
  284. dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  285. name, PTR_ERR(op->fallback_tfm));
  286. return PTR_ERR(op->fallback_tfm);
  287. }
  288. sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
  289. crypto_skcipher_reqsize(op->fallback_tfm);
  290. dev_info(op->ce->dev, "Fallback for %s is %s\n",
  291. crypto_tfm_alg_driver_name(&sktfm->base),
  292. crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
  293. op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
  294. op->enginectx.op.prepare_request = NULL;
  295. op->enginectx.op.unprepare_request = NULL;
  296. err = pm_runtime_get_sync(op->ce->dev);
  297. if (err < 0)
  298. goto error_pm;
  299. return 0;
  300. error_pm:
  301. pm_runtime_put_noidle(op->ce->dev);
  302. crypto_free_skcipher(op->fallback_tfm);
  303. return err;
  304. }
  305. void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
  306. {
  307. struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
  308. kfree_sensitive(op->key);
  309. crypto_free_skcipher(op->fallback_tfm);
  310. pm_runtime_put_sync_suspend(op->ce->dev);
  311. }
  312. int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  313. unsigned int keylen)
  314. {
  315. struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  316. struct sl3516_ce_dev *ce = op->ce;
  317. switch (keylen) {
  318. case 128 / 8:
  319. break;
  320. case 192 / 8:
  321. break;
  322. case 256 / 8:
  323. break;
  324. default:
  325. dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
  326. return -EINVAL;
  327. }
  328. kfree_sensitive(op->key);
  329. op->keylen = keylen;
  330. op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
  331. if (!op->key)
  332. return -ENOMEM;
  333. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  334. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  335. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  336. }