cesa.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
  4. * that can be found on the following platform: Orion, Kirkwood, Armada. This
  5. * driver supports the TDMA engine on platforms on which it is available.
  6. *
  7. * Author: Boris Brezillon <[email protected]>
  8. * Author: Arnaud Ebalard <[email protected]>
  9. *
  10. * This work is based on an initial version written by
  11. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  12. */
  13. #include <linux/delay.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/genalloc.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/kthread.h>
  19. #include <linux/mbus.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/slab.h>
  23. #include <linux/module.h>
  24. #include <linux/clk.h>
  25. #include <linux/of.h>
  26. #include <linux/of_platform.h>
  27. #include <linux/of_irq.h>
  28. #include "cesa.h"
  29. /* Limit of the crypto queue before reaching the backlog */
  30. #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  31. struct mv_cesa_dev *cesa_dev;
  32. struct crypto_async_request *
  33. mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  34. struct crypto_async_request **backlog)
  35. {
  36. struct crypto_async_request *req;
  37. *backlog = crypto_get_backlog(&engine->queue);
  38. req = crypto_dequeue_request(&engine->queue);
  39. if (!req)
  40. return NULL;
  41. return req;
  42. }
  43. static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  44. {
  45. struct crypto_async_request *req = NULL, *backlog = NULL;
  46. struct mv_cesa_ctx *ctx;
  47. spin_lock_bh(&engine->lock);
  48. if (!engine->req) {
  49. req = mv_cesa_dequeue_req_locked(engine, &backlog);
  50. engine->req = req;
  51. }
  52. spin_unlock_bh(&engine->lock);
  53. if (!req)
  54. return;
  55. if (backlog)
  56. backlog->complete(backlog, -EINPROGRESS);
  57. ctx = crypto_tfm_ctx(req->tfm);
  58. ctx->ops->step(req);
  59. }
  60. static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  61. {
  62. struct crypto_async_request *req;
  63. struct mv_cesa_ctx *ctx;
  64. int res;
  65. req = engine->req;
  66. ctx = crypto_tfm_ctx(req->tfm);
  67. res = ctx->ops->process(req, status);
  68. if (res == 0) {
  69. ctx->ops->complete(req);
  70. mv_cesa_engine_enqueue_complete_request(engine, req);
  71. } else if (res == -EINPROGRESS) {
  72. ctx->ops->step(req);
  73. }
  74. return res;
  75. }
  76. static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
  77. {
  78. if (engine->chain.first && engine->chain.last)
  79. return mv_cesa_tdma_process(engine, status);
  80. return mv_cesa_std_process(engine, status);
  81. }
  82. static inline void
  83. mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
  84. int res)
  85. {
  86. ctx->ops->cleanup(req);
  87. local_bh_disable();
  88. req->complete(req, res);
  89. local_bh_enable();
  90. }
  91. static irqreturn_t mv_cesa_int(int irq, void *priv)
  92. {
  93. struct mv_cesa_engine *engine = priv;
  94. struct crypto_async_request *req;
  95. struct mv_cesa_ctx *ctx;
  96. u32 status, mask;
  97. irqreturn_t ret = IRQ_NONE;
  98. while (true) {
  99. int res;
  100. mask = mv_cesa_get_int_mask(engine);
  101. status = readl(engine->regs + CESA_SA_INT_STATUS);
  102. if (!(status & mask))
  103. break;
  104. /*
  105. * TODO: avoid clearing the FPGA_INT_STATUS if this not
  106. * relevant on some platforms.
  107. */
  108. writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
  109. writel(~status, engine->regs + CESA_SA_INT_STATUS);
  110. /* Process fetched requests */
  111. res = mv_cesa_int_process(engine, status & mask);
  112. ret = IRQ_HANDLED;
  113. spin_lock_bh(&engine->lock);
  114. req = engine->req;
  115. if (res != -EINPROGRESS)
  116. engine->req = NULL;
  117. spin_unlock_bh(&engine->lock);
  118. ctx = crypto_tfm_ctx(req->tfm);
  119. if (res && res != -EINPROGRESS)
  120. mv_cesa_complete_req(ctx, req, res);
  121. /* Launch the next pending request */
  122. mv_cesa_rearm_engine(engine);
  123. /* Iterate over the complete queue */
  124. while (true) {
  125. req = mv_cesa_engine_dequeue_complete_request(engine);
  126. if (!req)
  127. break;
  128. ctx = crypto_tfm_ctx(req->tfm);
  129. mv_cesa_complete_req(ctx, req, 0);
  130. }
  131. }
  132. return ret;
  133. }
  134. int mv_cesa_queue_req(struct crypto_async_request *req,
  135. struct mv_cesa_req *creq)
  136. {
  137. int ret;
  138. struct mv_cesa_engine *engine = creq->engine;
  139. spin_lock_bh(&engine->lock);
  140. ret = crypto_enqueue_request(&engine->queue, req);
  141. if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
  142. (ret == -EINPROGRESS || ret == -EBUSY))
  143. mv_cesa_tdma_chain(engine, creq);
  144. spin_unlock_bh(&engine->lock);
  145. if (ret != -EINPROGRESS)
  146. return ret;
  147. mv_cesa_rearm_engine(engine);
  148. return -EINPROGRESS;
  149. }
  150. static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
  151. {
  152. int ret;
  153. int i, j;
  154. for (i = 0; i < cesa->caps->ncipher_algs; i++) {
  155. ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
  156. if (ret)
  157. goto err_unregister_crypto;
  158. }
  159. for (i = 0; i < cesa->caps->nahash_algs; i++) {
  160. ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
  161. if (ret)
  162. goto err_unregister_ahash;
  163. }
  164. return 0;
  165. err_unregister_ahash:
  166. for (j = 0; j < i; j++)
  167. crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
  168. i = cesa->caps->ncipher_algs;
  169. err_unregister_crypto:
  170. for (j = 0; j < i; j++)
  171. crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
  172. return ret;
  173. }
  174. static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
  175. {
  176. int i;
  177. for (i = 0; i < cesa->caps->nahash_algs; i++)
  178. crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
  179. for (i = 0; i < cesa->caps->ncipher_algs; i++)
  180. crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
  181. }
  182. static struct skcipher_alg *orion_cipher_algs[] = {
  183. &mv_cesa_ecb_des_alg,
  184. &mv_cesa_cbc_des_alg,
  185. &mv_cesa_ecb_des3_ede_alg,
  186. &mv_cesa_cbc_des3_ede_alg,
  187. &mv_cesa_ecb_aes_alg,
  188. &mv_cesa_cbc_aes_alg,
  189. };
  190. static struct ahash_alg *orion_ahash_algs[] = {
  191. &mv_md5_alg,
  192. &mv_sha1_alg,
  193. &mv_ahmac_md5_alg,
  194. &mv_ahmac_sha1_alg,
  195. };
  196. static struct skcipher_alg *armada_370_cipher_algs[] = {
  197. &mv_cesa_ecb_des_alg,
  198. &mv_cesa_cbc_des_alg,
  199. &mv_cesa_ecb_des3_ede_alg,
  200. &mv_cesa_cbc_des3_ede_alg,
  201. &mv_cesa_ecb_aes_alg,
  202. &mv_cesa_cbc_aes_alg,
  203. };
  204. static struct ahash_alg *armada_370_ahash_algs[] = {
  205. &mv_md5_alg,
  206. &mv_sha1_alg,
  207. &mv_sha256_alg,
  208. &mv_ahmac_md5_alg,
  209. &mv_ahmac_sha1_alg,
  210. &mv_ahmac_sha256_alg,
  211. };
  212. static const struct mv_cesa_caps orion_caps = {
  213. .nengines = 1,
  214. .cipher_algs = orion_cipher_algs,
  215. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  216. .ahash_algs = orion_ahash_algs,
  217. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  218. .has_tdma = false,
  219. };
  220. static const struct mv_cesa_caps kirkwood_caps = {
  221. .nengines = 1,
  222. .cipher_algs = orion_cipher_algs,
  223. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  224. .ahash_algs = orion_ahash_algs,
  225. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  226. .has_tdma = true,
  227. };
  228. static const struct mv_cesa_caps armada_370_caps = {
  229. .nengines = 1,
  230. .cipher_algs = armada_370_cipher_algs,
  231. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  232. .ahash_algs = armada_370_ahash_algs,
  233. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  234. .has_tdma = true,
  235. };
  236. static const struct mv_cesa_caps armada_xp_caps = {
  237. .nengines = 2,
  238. .cipher_algs = armada_370_cipher_algs,
  239. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  240. .ahash_algs = armada_370_ahash_algs,
  241. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  242. .has_tdma = true,
  243. };
  244. static const struct of_device_id mv_cesa_of_match_table[] = {
  245. { .compatible = "marvell,orion-crypto", .data = &orion_caps },
  246. { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
  247. { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
  248. { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
  249. { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
  250. { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
  251. { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
  252. {}
  253. };
  254. MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
  255. static void
  256. mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
  257. const struct mbus_dram_target_info *dram)
  258. {
  259. void __iomem *iobase = engine->regs;
  260. int i;
  261. for (i = 0; i < 4; i++) {
  262. writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
  263. writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
  264. }
  265. for (i = 0; i < dram->num_cs; i++) {
  266. const struct mbus_dram_window *cs = dram->cs + i;
  267. writel(((cs->size - 1) & 0xffff0000) |
  268. (cs->mbus_attr << 8) |
  269. (dram->mbus_dram_target_id << 4) | 1,
  270. iobase + CESA_TDMA_WINDOW_CTRL(i));
  271. writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
  272. }
  273. }
  274. static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
  275. {
  276. struct device *dev = cesa->dev;
  277. struct mv_cesa_dev_dma *dma;
  278. if (!cesa->caps->has_tdma)
  279. return 0;
  280. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  281. if (!dma)
  282. return -ENOMEM;
  283. dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
  284. sizeof(struct mv_cesa_tdma_desc),
  285. 16, 0);
  286. if (!dma->tdma_desc_pool)
  287. return -ENOMEM;
  288. dma->op_pool = dmam_pool_create("cesa_op", dev,
  289. sizeof(struct mv_cesa_op_ctx), 16, 0);
  290. if (!dma->op_pool)
  291. return -ENOMEM;
  292. dma->cache_pool = dmam_pool_create("cesa_cache", dev,
  293. CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
  294. if (!dma->cache_pool)
  295. return -ENOMEM;
  296. dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
  297. if (!dma->padding_pool)
  298. return -ENOMEM;
  299. cesa->dma = dma;
  300. return 0;
  301. }
  302. static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
  303. {
  304. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  305. struct mv_cesa_engine *engine = &cesa->engines[idx];
  306. const char *res_name = "sram";
  307. struct resource *res;
  308. engine->pool = of_gen_pool_get(cesa->dev->of_node,
  309. "marvell,crypto-srams", idx);
  310. if (engine->pool) {
  311. engine->sram_pool = gen_pool_dma_alloc(engine->pool,
  312. cesa->sram_size,
  313. &engine->sram_dma);
  314. if (engine->sram_pool)
  315. return 0;
  316. engine->pool = NULL;
  317. return -ENOMEM;
  318. }
  319. if (cesa->caps->nengines > 1) {
  320. if (!idx)
  321. res_name = "sram0";
  322. else
  323. res_name = "sram1";
  324. }
  325. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  326. res_name);
  327. if (!res || resource_size(res) < cesa->sram_size)
  328. return -EINVAL;
  329. engine->sram = devm_ioremap_resource(cesa->dev, res);
  330. if (IS_ERR(engine->sram))
  331. return PTR_ERR(engine->sram);
  332. engine->sram_dma = dma_map_resource(cesa->dev, res->start,
  333. cesa->sram_size,
  334. DMA_BIDIRECTIONAL, 0);
  335. if (dma_mapping_error(cesa->dev, engine->sram_dma))
  336. return -ENOMEM;
  337. return 0;
  338. }
  339. static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
  340. {
  341. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  342. struct mv_cesa_engine *engine = &cesa->engines[idx];
  343. if (engine->pool)
  344. gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
  345. cesa->sram_size);
  346. else
  347. dma_unmap_resource(cesa->dev, engine->sram_dma,
  348. cesa->sram_size, DMA_BIDIRECTIONAL, 0);
  349. }
  350. static int mv_cesa_probe(struct platform_device *pdev)
  351. {
  352. const struct mv_cesa_caps *caps = &orion_caps;
  353. const struct mbus_dram_target_info *dram;
  354. const struct of_device_id *match;
  355. struct device *dev = &pdev->dev;
  356. struct mv_cesa_dev *cesa;
  357. struct mv_cesa_engine *engines;
  358. int irq, ret, i, cpu;
  359. u32 sram_size;
  360. if (cesa_dev) {
  361. dev_err(&pdev->dev, "Only one CESA device authorized\n");
  362. return -EEXIST;
  363. }
  364. if (dev->of_node) {
  365. match = of_match_node(mv_cesa_of_match_table, dev->of_node);
  366. if (!match || !match->data)
  367. return -ENOTSUPP;
  368. caps = match->data;
  369. }
  370. cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
  371. if (!cesa)
  372. return -ENOMEM;
  373. cesa->caps = caps;
  374. cesa->dev = dev;
  375. sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
  376. of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
  377. &sram_size);
  378. if (sram_size < CESA_SA_MIN_SRAM_SIZE)
  379. sram_size = CESA_SA_MIN_SRAM_SIZE;
  380. cesa->sram_size = sram_size;
  381. cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
  382. GFP_KERNEL);
  383. if (!cesa->engines)
  384. return -ENOMEM;
  385. spin_lock_init(&cesa->lock);
  386. cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
  387. if (IS_ERR(cesa->regs))
  388. return PTR_ERR(cesa->regs);
  389. ret = mv_cesa_dev_dma_init(cesa);
  390. if (ret)
  391. return ret;
  392. dram = mv_mbus_dram_info_nooverlap();
  393. platform_set_drvdata(pdev, cesa);
  394. for (i = 0; i < caps->nengines; i++) {
  395. struct mv_cesa_engine *engine = &cesa->engines[i];
  396. char res_name[7];
  397. engine->id = i;
  398. spin_lock_init(&engine->lock);
  399. ret = mv_cesa_get_sram(pdev, i);
  400. if (ret)
  401. goto err_cleanup;
  402. irq = platform_get_irq(pdev, i);
  403. if (irq < 0) {
  404. ret = irq;
  405. goto err_cleanup;
  406. }
  407. engine->irq = irq;
  408. /*
  409. * Not all platforms can gate the CESA clocks: do not complain
  410. * if the clock does not exist.
  411. */
  412. snprintf(res_name, sizeof(res_name), "cesa%d", i);
  413. engine->clk = devm_clk_get(dev, res_name);
  414. if (IS_ERR(engine->clk)) {
  415. engine->clk = devm_clk_get(dev, NULL);
  416. if (IS_ERR(engine->clk))
  417. engine->clk = NULL;
  418. }
  419. snprintf(res_name, sizeof(res_name), "cesaz%d", i);
  420. engine->zclk = devm_clk_get(dev, res_name);
  421. if (IS_ERR(engine->zclk))
  422. engine->zclk = NULL;
  423. ret = clk_prepare_enable(engine->clk);
  424. if (ret)
  425. goto err_cleanup;
  426. ret = clk_prepare_enable(engine->zclk);
  427. if (ret)
  428. goto err_cleanup;
  429. engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
  430. if (dram && cesa->caps->has_tdma)
  431. mv_cesa_conf_mbus_windows(engine, dram);
  432. writel(0, engine->regs + CESA_SA_INT_STATUS);
  433. writel(CESA_SA_CFG_STOP_DIG_ERR,
  434. engine->regs + CESA_SA_CFG);
  435. writel(engine->sram_dma & CESA_SA_SRAM_MSK,
  436. engine->regs + CESA_SA_DESC_P0);
  437. ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
  438. IRQF_ONESHOT,
  439. dev_name(&pdev->dev),
  440. engine);
  441. if (ret)
  442. goto err_cleanup;
  443. /* Set affinity */
  444. cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
  445. irq_set_affinity_hint(irq, get_cpu_mask(cpu));
  446. crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
  447. atomic_set(&engine->load, 0);
  448. INIT_LIST_HEAD(&engine->complete_queue);
  449. }
  450. cesa_dev = cesa;
  451. ret = mv_cesa_add_algs(cesa);
  452. if (ret) {
  453. cesa_dev = NULL;
  454. goto err_cleanup;
  455. }
  456. dev_info(dev, "CESA device successfully registered\n");
  457. return 0;
  458. err_cleanup:
  459. for (i = 0; i < caps->nengines; i++) {
  460. clk_disable_unprepare(cesa->engines[i].zclk);
  461. clk_disable_unprepare(cesa->engines[i].clk);
  462. mv_cesa_put_sram(pdev, i);
  463. if (cesa->engines[i].irq > 0)
  464. irq_set_affinity_hint(cesa->engines[i].irq, NULL);
  465. }
  466. return ret;
  467. }
  468. static int mv_cesa_remove(struct platform_device *pdev)
  469. {
  470. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  471. int i;
  472. mv_cesa_remove_algs(cesa);
  473. for (i = 0; i < cesa->caps->nengines; i++) {
  474. clk_disable_unprepare(cesa->engines[i].zclk);
  475. clk_disable_unprepare(cesa->engines[i].clk);
  476. mv_cesa_put_sram(pdev, i);
  477. irq_set_affinity_hint(cesa->engines[i].irq, NULL);
  478. }
  479. return 0;
  480. }
  481. static const struct platform_device_id mv_cesa_plat_id_table[] = {
  482. { .name = "mv_crypto" },
  483. { /* sentinel */ },
  484. };
  485. MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
  486. static struct platform_driver marvell_cesa = {
  487. .probe = mv_cesa_probe,
  488. .remove = mv_cesa_remove,
  489. .id_table = mv_cesa_plat_id_table,
  490. .driver = {
  491. .name = "marvell-cesa",
  492. .of_match_table = mv_cesa_of_match_table,
  493. },
  494. };
  495. module_platform_driver(marvell_cesa);
  496. MODULE_AUTHOR("Boris Brezillon <[email protected]>");
  497. MODULE_AUTHOR("Arnaud Ebalard <[email protected]>");
  498. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  499. MODULE_LICENSE("GPL v2");