caamrng.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * caam - Freescale FSL CAAM support for hw_random
  4. *
  5. * Copyright 2011 Freescale Semiconductor, Inc.
  6. * Copyright 2018-2019 NXP
  7. *
  8. * Based on caamalg.c crypto API driver.
  9. *
  10. */
  11. #include <linux/hw_random.h>
  12. #include <linux/completion.h>
  13. #include <linux/atomic.h>
  14. #include <linux/kfifo.h>
  15. #include "compat.h"
  16. #include "regs.h"
  17. #include "intern.h"
  18. #include "desc_constr.h"
  19. #include "jr.h"
  20. #include "error.h"
  21. #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
  22. /*
  23. * Length of used descriptors, see caam_init_desc()
  24. */
  25. #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
  26. CAAM_CMD_SZ + \
  27. CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
  28. /* rng per-device context */
  29. struct caam_rng_ctx {
  30. struct hwrng rng;
  31. struct device *jrdev;
  32. struct device *ctrldev;
  33. void *desc_async;
  34. void *desc_sync;
  35. struct work_struct worker;
  36. struct kfifo fifo;
  37. };
  38. struct caam_rng_job_ctx {
  39. struct completion *done;
  40. int *err;
  41. };
  42. static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
  43. {
  44. return (struct caam_rng_ctx *)r->priv;
  45. }
  46. static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
  47. void *context)
  48. {
  49. struct caam_rng_job_ctx *jctx = context;
  50. if (err)
  51. *jctx->err = caam_jr_strstatus(jrdev, err);
  52. complete(jctx->done);
  53. }
  54. static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
  55. {
  56. init_job_desc(desc, 0); /* + 1 cmd_sz */
  57. /* Generate random bytes: + 1 cmd_sz */
  58. append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
  59. OP_ALG_PR_ON);
  60. /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
  61. append_fifo_store(desc, dst_dma,
  62. CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
  63. print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
  64. 16, 4, desc, desc_bytes(desc), 1);
  65. return desc;
  66. }
  67. static int caam_rng_read_one(struct device *jrdev,
  68. void *dst, int len,
  69. void *desc,
  70. struct completion *done)
  71. {
  72. dma_addr_t dst_dma;
  73. int err, ret = 0;
  74. struct caam_rng_job_ctx jctx = {
  75. .done = done,
  76. .err = &ret,
  77. };
  78. len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
  79. dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
  80. if (dma_mapping_error(jrdev, dst_dma)) {
  81. dev_err(jrdev, "unable to map destination memory\n");
  82. return -ENOMEM;
  83. }
  84. init_completion(done);
  85. err = caam_jr_enqueue(jrdev,
  86. caam_init_desc(desc, dst_dma),
  87. caam_rng_done, &jctx);
  88. if (err == -EINPROGRESS) {
  89. wait_for_completion(done);
  90. err = 0;
  91. }
  92. dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
  93. return err ?: (ret ?: len);
  94. }
  95. static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
  96. {
  97. struct scatterlist sg[1];
  98. struct completion done;
  99. int len, nents;
  100. sg_init_table(sg, ARRAY_SIZE(sg));
  101. nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
  102. CAAM_RNG_MAX_FIFO_STORE_SIZE);
  103. if (!nents)
  104. return;
  105. len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
  106. sg[0].length,
  107. ctx->desc_async,
  108. &done);
  109. if (len < 0)
  110. return;
  111. kfifo_dma_in_finish(&ctx->fifo, len);
  112. }
  113. static void caam_rng_worker(struct work_struct *work)
  114. {
  115. struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
  116. worker);
  117. caam_rng_fill_async(ctx);
  118. }
  119. static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
  120. {
  121. struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
  122. int out;
  123. if (wait) {
  124. struct completion done;
  125. return caam_rng_read_one(ctx->jrdev, dst, max,
  126. ctx->desc_sync, &done);
  127. }
  128. out = kfifo_out(&ctx->fifo, dst, max);
  129. if (kfifo_is_empty(&ctx->fifo))
  130. schedule_work(&ctx->worker);
  131. return out;
  132. }
  133. static void caam_cleanup(struct hwrng *rng)
  134. {
  135. struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
  136. flush_work(&ctx->worker);
  137. caam_jr_free(ctx->jrdev);
  138. kfifo_free(&ctx->fifo);
  139. }
  140. static int caam_init(struct hwrng *rng)
  141. {
  142. struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
  143. int err;
  144. ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
  145. GFP_DMA | GFP_KERNEL);
  146. if (!ctx->desc_sync)
  147. return -ENOMEM;
  148. ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
  149. GFP_DMA | GFP_KERNEL);
  150. if (!ctx->desc_async)
  151. return -ENOMEM;
  152. if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
  153. GFP_DMA | GFP_KERNEL))
  154. return -ENOMEM;
  155. INIT_WORK(&ctx->worker, caam_rng_worker);
  156. ctx->jrdev = caam_jr_alloc();
  157. err = PTR_ERR_OR_ZERO(ctx->jrdev);
  158. if (err) {
  159. kfifo_free(&ctx->fifo);
  160. pr_err("Job Ring Device allocation for transform failed\n");
  161. return err;
  162. }
  163. /*
  164. * Fill async buffer to have early randomness data for
  165. * hw_random
  166. */
  167. caam_rng_fill_async(ctx);
  168. return 0;
  169. }
  170. int caam_rng_init(struct device *ctrldev);
  171. void caam_rng_exit(struct device *ctrldev)
  172. {
  173. devres_release_group(ctrldev, caam_rng_init);
  174. }
  175. int caam_rng_init(struct device *ctrldev)
  176. {
  177. struct caam_rng_ctx *ctx;
  178. u32 rng_inst;
  179. struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
  180. int ret;
  181. /* Check for an instantiated RNG before registration */
  182. if (priv->era < 10)
  183. rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
  184. CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
  185. else
  186. rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
  187. if (!rng_inst)
  188. return 0;
  189. if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
  190. return -ENOMEM;
  191. ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
  192. if (!ctx)
  193. return -ENOMEM;
  194. ctx->ctrldev = ctrldev;
  195. ctx->rng.name = "rng-caam";
  196. ctx->rng.init = caam_init;
  197. ctx->rng.cleanup = caam_cleanup;
  198. ctx->rng.read = caam_read;
  199. ctx->rng.priv = (unsigned long)ctx;
  200. ctx->rng.quality = 1024;
  201. dev_info(ctrldev, "registering rng-caam\n");
  202. ret = devm_hwrng_register(ctrldev, &ctx->rng);
  203. if (ret) {
  204. caam_rng_exit(ctrldev);
  205. return ret;
  206. }
  207. devres_close_group(ctrldev, caam_rng_init);
  208. return 0;
  209. }