caampkc.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  2. /*
  3. * caam - Freescale FSL CAAM support for Public Key Cryptography
  4. *
  5. * Copyright 2016 Freescale Semiconductor, Inc.
  6. * Copyright 2018-2019 NXP
  7. *
  8. * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
  9. * all the desired key parameters, input and output pointers.
  10. */
  11. #include "compat.h"
  12. #include "regs.h"
  13. #include "intern.h"
  14. #include "jr.h"
  15. #include "error.h"
  16. #include "desc_constr.h"
  17. #include "sg_sw_sec4.h"
  18. #include "caampkc.h"
  19. #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
  20. #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
  21. SIZEOF_RSA_PRIV_F1_PDB)
  22. #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
  23. SIZEOF_RSA_PRIV_F2_PDB)
  24. #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
  25. SIZEOF_RSA_PRIV_F3_PDB)
  26. #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
  27. /* buffer filled with zeros, used for padding */
  28. static u8 *zero_buffer;
  29. /*
  30. * variable used to avoid double free of resources in case
  31. * algorithm registration was unsuccessful
  32. */
  33. static bool init_done;
  34. struct caam_akcipher_alg {
  35. struct akcipher_alg akcipher;
  36. bool registered;
  37. };
  38. static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
  39. struct akcipher_request *req)
  40. {
  41. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  42. dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
  43. dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
  44. if (edesc->sec4_sg_bytes)
  45. dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
  46. DMA_TO_DEVICE);
  47. }
  48. static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
  49. struct akcipher_request *req)
  50. {
  51. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  52. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  53. struct caam_rsa_key *key = &ctx->key;
  54. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  55. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  56. dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
  57. }
  58. static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
  59. struct akcipher_request *req)
  60. {
  61. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  62. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  63. struct caam_rsa_key *key = &ctx->key;
  64. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  65. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  66. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  67. }
  68. static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
  69. struct akcipher_request *req)
  70. {
  71. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  72. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  73. struct caam_rsa_key *key = &ctx->key;
  74. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  75. size_t p_sz = key->p_sz;
  76. size_t q_sz = key->q_sz;
  77. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  78. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  79. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  80. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  81. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  82. }
  83. static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
  84. struct akcipher_request *req)
  85. {
  86. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  87. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  88. struct caam_rsa_key *key = &ctx->key;
  89. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  90. size_t p_sz = key->p_sz;
  91. size_t q_sz = key->q_sz;
  92. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  93. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  94. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  95. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  96. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  97. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  98. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  99. }
  100. /* RSA Job Completion handler */
  101. static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
  102. {
  103. struct akcipher_request *req = context;
  104. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  105. struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
  106. struct rsa_edesc *edesc;
  107. int ecode = 0;
  108. bool has_bklog;
  109. if (err)
  110. ecode = caam_jr_strstatus(dev, err);
  111. edesc = req_ctx->edesc;
  112. has_bklog = edesc->bklog;
  113. rsa_pub_unmap(dev, edesc, req);
  114. rsa_io_unmap(dev, edesc, req);
  115. kfree(edesc);
  116. /*
  117. * If no backlog flag, the completion of the request is done
  118. * by CAAM, not crypto engine.
  119. */
  120. if (!has_bklog)
  121. akcipher_request_complete(req, ecode);
  122. else
  123. crypto_finalize_akcipher_request(jrp->engine, req, ecode);
  124. }
  125. static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
  126. void *context)
  127. {
  128. struct akcipher_request *req = context;
  129. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  130. struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
  131. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  132. struct caam_rsa_key *key = &ctx->key;
  133. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  134. struct rsa_edesc *edesc;
  135. int ecode = 0;
  136. bool has_bklog;
  137. if (err)
  138. ecode = caam_jr_strstatus(dev, err);
  139. edesc = req_ctx->edesc;
  140. has_bklog = edesc->bklog;
  141. switch (key->priv_form) {
  142. case FORM1:
  143. rsa_priv_f1_unmap(dev, edesc, req);
  144. break;
  145. case FORM2:
  146. rsa_priv_f2_unmap(dev, edesc, req);
  147. break;
  148. case FORM3:
  149. rsa_priv_f3_unmap(dev, edesc, req);
  150. }
  151. rsa_io_unmap(dev, edesc, req);
  152. kfree(edesc);
  153. /*
  154. * If no backlog flag, the completion of the request is done
  155. * by CAAM, not crypto engine.
  156. */
  157. if (!has_bklog)
  158. akcipher_request_complete(req, ecode);
  159. else
  160. crypto_finalize_akcipher_request(jrp->engine, req, ecode);
  161. }
  162. /**
  163. * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
  164. * from a given scatterlist
  165. *
  166. * @sgl : scatterlist to count zeros from
  167. * @nbytes: number of zeros, in bytes, to strip
  168. * @flags : operation flags
  169. */
  170. static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
  171. unsigned int nbytes,
  172. unsigned int flags)
  173. {
  174. struct sg_mapping_iter miter;
  175. int lzeros, ents;
  176. unsigned int len;
  177. unsigned int tbytes = nbytes;
  178. const u8 *buff;
  179. ents = sg_nents_for_len(sgl, nbytes);
  180. if (ents < 0)
  181. return ents;
  182. sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
  183. lzeros = 0;
  184. len = 0;
  185. while (nbytes > 0) {
  186. /* do not strip more than given bytes */
  187. while (len && !*buff && lzeros < nbytes) {
  188. lzeros++;
  189. len--;
  190. buff++;
  191. }
  192. if (len && *buff)
  193. break;
  194. if (!sg_miter_next(&miter))
  195. break;
  196. buff = miter.addr;
  197. len = miter.length;
  198. nbytes -= lzeros;
  199. lzeros = 0;
  200. }
  201. miter.consumed = lzeros;
  202. sg_miter_stop(&miter);
  203. nbytes -= lzeros;
  204. return tbytes - nbytes;
  205. }
  206. static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
  207. size_t desclen)
  208. {
  209. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  210. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  211. struct device *dev = ctx->dev;
  212. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  213. struct caam_rsa_key *key = &ctx->key;
  214. struct rsa_edesc *edesc;
  215. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  216. GFP_KERNEL : GFP_ATOMIC;
  217. int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
  218. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  219. int src_nents, dst_nents;
  220. int mapped_src_nents, mapped_dst_nents;
  221. unsigned int diff_size = 0;
  222. int lzeros;
  223. if (req->src_len > key->n_sz) {
  224. /*
  225. * strip leading zeros and
  226. * return the number of zeros to skip
  227. */
  228. lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
  229. key->n_sz, sg_flags);
  230. if (lzeros < 0)
  231. return ERR_PTR(lzeros);
  232. req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
  233. lzeros);
  234. req_ctx->fixup_src_len = req->src_len - lzeros;
  235. } else {
  236. /*
  237. * input src is less then n key modulus,
  238. * so there will be zero padding
  239. */
  240. diff_size = key->n_sz - req->src_len;
  241. req_ctx->fixup_src = req->src;
  242. req_ctx->fixup_src_len = req->src_len;
  243. }
  244. src_nents = sg_nents_for_len(req_ctx->fixup_src,
  245. req_ctx->fixup_src_len);
  246. dst_nents = sg_nents_for_len(req->dst, req->dst_len);
  247. mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
  248. DMA_TO_DEVICE);
  249. if (unlikely(!mapped_src_nents)) {
  250. dev_err(dev, "unable to map source\n");
  251. return ERR_PTR(-ENOMEM);
  252. }
  253. mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
  254. DMA_FROM_DEVICE);
  255. if (unlikely(!mapped_dst_nents)) {
  256. dev_err(dev, "unable to map destination\n");
  257. goto src_fail;
  258. }
  259. if (!diff_size && mapped_src_nents == 1)
  260. sec4_sg_len = 0; /* no need for an input hw s/g table */
  261. else
  262. sec4_sg_len = mapped_src_nents + !!diff_size;
  263. sec4_sg_index = sec4_sg_len;
  264. if (mapped_dst_nents > 1)
  265. sec4_sg_len += pad_sg_nents(mapped_dst_nents);
  266. else
  267. sec4_sg_len = pad_sg_nents(sec4_sg_len);
  268. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  269. /* allocate space for base edesc, hw desc commands and link tables */
  270. edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
  271. GFP_DMA | flags);
  272. if (!edesc)
  273. goto dst_fail;
  274. edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
  275. if (diff_size)
  276. dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
  277. 0);
  278. if (sec4_sg_index)
  279. sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
  280. edesc->sec4_sg + !!diff_size, 0);
  281. if (mapped_dst_nents > 1)
  282. sg_to_sec4_sg_last(req->dst, req->dst_len,
  283. edesc->sec4_sg + sec4_sg_index, 0);
  284. /* Save nents for later use in Job Descriptor */
  285. edesc->src_nents = src_nents;
  286. edesc->dst_nents = dst_nents;
  287. req_ctx->edesc = edesc;
  288. if (!sec4_sg_bytes)
  289. return edesc;
  290. edesc->mapped_src_nents = mapped_src_nents;
  291. edesc->mapped_dst_nents = mapped_dst_nents;
  292. edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
  293. sec4_sg_bytes, DMA_TO_DEVICE);
  294. if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
  295. dev_err(dev, "unable to map S/G table\n");
  296. goto sec4_sg_fail;
  297. }
  298. edesc->sec4_sg_bytes = sec4_sg_bytes;
  299. print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
  300. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  301. edesc->sec4_sg_bytes, 1);
  302. return edesc;
  303. sec4_sg_fail:
  304. kfree(edesc);
  305. dst_fail:
  306. dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
  307. src_fail:
  308. dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
  309. return ERR_PTR(-ENOMEM);
  310. }
  311. static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
  312. {
  313. struct akcipher_request *req = container_of(areq,
  314. struct akcipher_request,
  315. base);
  316. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  317. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  318. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  319. struct device *jrdev = ctx->dev;
  320. u32 *desc = req_ctx->edesc->hw_desc;
  321. int ret;
  322. req_ctx->edesc->bklog = true;
  323. ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
  324. if (ret == -ENOSPC && engine->retry_support)
  325. return ret;
  326. if (ret != -EINPROGRESS) {
  327. rsa_pub_unmap(jrdev, req_ctx->edesc, req);
  328. rsa_io_unmap(jrdev, req_ctx->edesc, req);
  329. kfree(req_ctx->edesc);
  330. } else {
  331. ret = 0;
  332. }
  333. return ret;
  334. }
  335. static int set_rsa_pub_pdb(struct akcipher_request *req,
  336. struct rsa_edesc *edesc)
  337. {
  338. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  339. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  340. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  341. struct caam_rsa_key *key = &ctx->key;
  342. struct device *dev = ctx->dev;
  343. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  344. int sec4_sg_index = 0;
  345. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  346. if (dma_mapping_error(dev, pdb->n_dma)) {
  347. dev_err(dev, "Unable to map RSA modulus memory\n");
  348. return -ENOMEM;
  349. }
  350. pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
  351. if (dma_mapping_error(dev, pdb->e_dma)) {
  352. dev_err(dev, "Unable to map RSA public exponent memory\n");
  353. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  354. return -ENOMEM;
  355. }
  356. if (edesc->mapped_src_nents > 1) {
  357. pdb->sgf |= RSA_PDB_SGF_F;
  358. pdb->f_dma = edesc->sec4_sg_dma;
  359. sec4_sg_index += edesc->mapped_src_nents;
  360. } else {
  361. pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
  362. }
  363. if (edesc->mapped_dst_nents > 1) {
  364. pdb->sgf |= RSA_PDB_SGF_G;
  365. pdb->g_dma = edesc->sec4_sg_dma +
  366. sec4_sg_index * sizeof(struct sec4_sg_entry);
  367. } else {
  368. pdb->g_dma = sg_dma_address(req->dst);
  369. }
  370. pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
  371. pdb->f_len = req_ctx->fixup_src_len;
  372. return 0;
  373. }
  374. static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
  375. struct rsa_edesc *edesc)
  376. {
  377. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  378. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  379. struct caam_rsa_key *key = &ctx->key;
  380. struct device *dev = ctx->dev;
  381. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  382. int sec4_sg_index = 0;
  383. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  384. if (dma_mapping_error(dev, pdb->n_dma)) {
  385. dev_err(dev, "Unable to map modulus memory\n");
  386. return -ENOMEM;
  387. }
  388. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  389. if (dma_mapping_error(dev, pdb->d_dma)) {
  390. dev_err(dev, "Unable to map RSA private exponent memory\n");
  391. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  392. return -ENOMEM;
  393. }
  394. if (edesc->mapped_src_nents > 1) {
  395. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  396. pdb->g_dma = edesc->sec4_sg_dma;
  397. sec4_sg_index += edesc->mapped_src_nents;
  398. } else {
  399. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  400. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  401. }
  402. if (edesc->mapped_dst_nents > 1) {
  403. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  404. pdb->f_dma = edesc->sec4_sg_dma +
  405. sec4_sg_index * sizeof(struct sec4_sg_entry);
  406. } else {
  407. pdb->f_dma = sg_dma_address(req->dst);
  408. }
  409. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  410. return 0;
  411. }
  412. static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
  413. struct rsa_edesc *edesc)
  414. {
  415. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  416. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  417. struct caam_rsa_key *key = &ctx->key;
  418. struct device *dev = ctx->dev;
  419. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  420. int sec4_sg_index = 0;
  421. size_t p_sz = key->p_sz;
  422. size_t q_sz = key->q_sz;
  423. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  424. if (dma_mapping_error(dev, pdb->d_dma)) {
  425. dev_err(dev, "Unable to map RSA private exponent memory\n");
  426. return -ENOMEM;
  427. }
  428. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  429. if (dma_mapping_error(dev, pdb->p_dma)) {
  430. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  431. goto unmap_d;
  432. }
  433. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  434. if (dma_mapping_error(dev, pdb->q_dma)) {
  435. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  436. goto unmap_p;
  437. }
  438. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  439. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  440. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  441. goto unmap_q;
  442. }
  443. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  444. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  445. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  446. goto unmap_tmp1;
  447. }
  448. if (edesc->mapped_src_nents > 1) {
  449. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  450. pdb->g_dma = edesc->sec4_sg_dma;
  451. sec4_sg_index += edesc->mapped_src_nents;
  452. } else {
  453. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  454. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  455. }
  456. if (edesc->mapped_dst_nents > 1) {
  457. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  458. pdb->f_dma = edesc->sec4_sg_dma +
  459. sec4_sg_index * sizeof(struct sec4_sg_entry);
  460. } else {
  461. pdb->f_dma = sg_dma_address(req->dst);
  462. }
  463. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  464. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  465. return 0;
  466. unmap_tmp1:
  467. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  468. unmap_q:
  469. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  470. unmap_p:
  471. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  472. unmap_d:
  473. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  474. return -ENOMEM;
  475. }
  476. static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
  477. struct rsa_edesc *edesc)
  478. {
  479. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  480. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  481. struct caam_rsa_key *key = &ctx->key;
  482. struct device *dev = ctx->dev;
  483. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  484. int sec4_sg_index = 0;
  485. size_t p_sz = key->p_sz;
  486. size_t q_sz = key->q_sz;
  487. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  488. if (dma_mapping_error(dev, pdb->p_dma)) {
  489. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  490. return -ENOMEM;
  491. }
  492. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  493. if (dma_mapping_error(dev, pdb->q_dma)) {
  494. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  495. goto unmap_p;
  496. }
  497. pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
  498. if (dma_mapping_error(dev, pdb->dp_dma)) {
  499. dev_err(dev, "Unable to map RSA exponent dp memory\n");
  500. goto unmap_q;
  501. }
  502. pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
  503. if (dma_mapping_error(dev, pdb->dq_dma)) {
  504. dev_err(dev, "Unable to map RSA exponent dq memory\n");
  505. goto unmap_dp;
  506. }
  507. pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
  508. if (dma_mapping_error(dev, pdb->c_dma)) {
  509. dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
  510. goto unmap_dq;
  511. }
  512. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  513. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  514. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  515. goto unmap_qinv;
  516. }
  517. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  518. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  519. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  520. goto unmap_tmp1;
  521. }
  522. if (edesc->mapped_src_nents > 1) {
  523. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  524. pdb->g_dma = edesc->sec4_sg_dma;
  525. sec4_sg_index += edesc->mapped_src_nents;
  526. } else {
  527. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  528. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  529. }
  530. if (edesc->mapped_dst_nents > 1) {
  531. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  532. pdb->f_dma = edesc->sec4_sg_dma +
  533. sec4_sg_index * sizeof(struct sec4_sg_entry);
  534. } else {
  535. pdb->f_dma = sg_dma_address(req->dst);
  536. }
  537. pdb->sgf |= key->n_sz;
  538. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  539. return 0;
  540. unmap_tmp1:
  541. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  542. unmap_qinv:
  543. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  544. unmap_dq:
  545. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  546. unmap_dp:
  547. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  548. unmap_q:
  549. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  550. unmap_p:
  551. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  552. return -ENOMEM;
  553. }
  554. static int akcipher_enqueue_req(struct device *jrdev,
  555. void (*cbk)(struct device *jrdev, u32 *desc,
  556. u32 err, void *context),
  557. struct akcipher_request *req)
  558. {
  559. struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
  560. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  561. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  562. struct caam_rsa_key *key = &ctx->key;
  563. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  564. struct rsa_edesc *edesc = req_ctx->edesc;
  565. u32 *desc = edesc->hw_desc;
  566. int ret;
  567. req_ctx->akcipher_op_done = cbk;
  568. /*
  569. * Only the backlog request are sent to crypto-engine since the others
  570. * can be handled by CAAM, if free, especially since JR has up to 1024
  571. * entries (more than the 10 entries from crypto-engine).
  572. */
  573. if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
  574. ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
  575. req);
  576. else
  577. ret = caam_jr_enqueue(jrdev, desc, cbk, req);
  578. if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
  579. switch (key->priv_form) {
  580. case FORM1:
  581. rsa_priv_f1_unmap(jrdev, edesc, req);
  582. break;
  583. case FORM2:
  584. rsa_priv_f2_unmap(jrdev, edesc, req);
  585. break;
  586. case FORM3:
  587. rsa_priv_f3_unmap(jrdev, edesc, req);
  588. break;
  589. default:
  590. rsa_pub_unmap(jrdev, edesc, req);
  591. }
  592. rsa_io_unmap(jrdev, edesc, req);
  593. kfree(edesc);
  594. }
  595. return ret;
  596. }
  597. static int caam_rsa_enc(struct akcipher_request *req)
  598. {
  599. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  600. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  601. struct caam_rsa_key *key = &ctx->key;
  602. struct device *jrdev = ctx->dev;
  603. struct rsa_edesc *edesc;
  604. int ret;
  605. if (unlikely(!key->n || !key->e))
  606. return -EINVAL;
  607. if (req->dst_len < key->n_sz) {
  608. req->dst_len = key->n_sz;
  609. dev_err(jrdev, "Output buffer length less than parameter n\n");
  610. return -EOVERFLOW;
  611. }
  612. /* Allocate extended descriptor */
  613. edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
  614. if (IS_ERR(edesc))
  615. return PTR_ERR(edesc);
  616. /* Set RSA Encrypt Protocol Data Block */
  617. ret = set_rsa_pub_pdb(req, edesc);
  618. if (ret)
  619. goto init_fail;
  620. /* Initialize Job Descriptor */
  621. init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
  622. return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
  623. init_fail:
  624. rsa_io_unmap(jrdev, edesc, req);
  625. kfree(edesc);
  626. return ret;
  627. }
  628. static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
  629. {
  630. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  631. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  632. struct device *jrdev = ctx->dev;
  633. struct rsa_edesc *edesc;
  634. int ret;
  635. /* Allocate extended descriptor */
  636. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
  637. if (IS_ERR(edesc))
  638. return PTR_ERR(edesc);
  639. /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
  640. ret = set_rsa_priv_f1_pdb(req, edesc);
  641. if (ret)
  642. goto init_fail;
  643. /* Initialize Job Descriptor */
  644. init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
  645. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  646. init_fail:
  647. rsa_io_unmap(jrdev, edesc, req);
  648. kfree(edesc);
  649. return ret;
  650. }
  651. static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
  652. {
  653. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  654. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  655. struct device *jrdev = ctx->dev;
  656. struct rsa_edesc *edesc;
  657. int ret;
  658. /* Allocate extended descriptor */
  659. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
  660. if (IS_ERR(edesc))
  661. return PTR_ERR(edesc);
  662. /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
  663. ret = set_rsa_priv_f2_pdb(req, edesc);
  664. if (ret)
  665. goto init_fail;
  666. /* Initialize Job Descriptor */
  667. init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
  668. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  669. init_fail:
  670. rsa_io_unmap(jrdev, edesc, req);
  671. kfree(edesc);
  672. return ret;
  673. }
  674. static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
  675. {
  676. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  677. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  678. struct device *jrdev = ctx->dev;
  679. struct rsa_edesc *edesc;
  680. int ret;
  681. /* Allocate extended descriptor */
  682. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
  683. if (IS_ERR(edesc))
  684. return PTR_ERR(edesc);
  685. /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
  686. ret = set_rsa_priv_f3_pdb(req, edesc);
  687. if (ret)
  688. goto init_fail;
  689. /* Initialize Job Descriptor */
  690. init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
  691. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  692. init_fail:
  693. rsa_io_unmap(jrdev, edesc, req);
  694. kfree(edesc);
  695. return ret;
  696. }
  697. static int caam_rsa_dec(struct akcipher_request *req)
  698. {
  699. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  700. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  701. struct caam_rsa_key *key = &ctx->key;
  702. int ret;
  703. if (unlikely(!key->n || !key->d))
  704. return -EINVAL;
  705. if (req->dst_len < key->n_sz) {
  706. req->dst_len = key->n_sz;
  707. dev_err(ctx->dev, "Output buffer length less than parameter n\n");
  708. return -EOVERFLOW;
  709. }
  710. if (key->priv_form == FORM3)
  711. ret = caam_rsa_dec_priv_f3(req);
  712. else if (key->priv_form == FORM2)
  713. ret = caam_rsa_dec_priv_f2(req);
  714. else
  715. ret = caam_rsa_dec_priv_f1(req);
  716. return ret;
  717. }
  718. static void caam_rsa_free_key(struct caam_rsa_key *key)
  719. {
  720. kfree_sensitive(key->d);
  721. kfree_sensitive(key->p);
  722. kfree_sensitive(key->q);
  723. kfree_sensitive(key->dp);
  724. kfree_sensitive(key->dq);
  725. kfree_sensitive(key->qinv);
  726. kfree_sensitive(key->tmp1);
  727. kfree_sensitive(key->tmp2);
  728. kfree(key->e);
  729. kfree(key->n);
  730. memset(key, 0, sizeof(*key));
  731. }
  732. static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
  733. {
  734. while (!**ptr && *nbytes) {
  735. (*ptr)++;
  736. (*nbytes)--;
  737. }
  738. }
  739. /**
  740. * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
  741. * dP, dQ and qInv could decode to less than corresponding p, q length, as the
  742. * BER-encoding requires that the minimum number of bytes be used to encode the
  743. * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
  744. * length.
  745. *
  746. * @ptr : pointer to {dP, dQ, qInv} CRT member
  747. * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
  748. * @dstlen: length in bytes of corresponding p or q prime factor
  749. */
  750. static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
  751. {
  752. u8 *dst;
  753. caam_rsa_drop_leading_zeros(&ptr, &nbytes);
  754. if (!nbytes)
  755. return NULL;
  756. dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
  757. if (!dst)
  758. return NULL;
  759. memcpy(dst + (dstlen - nbytes), ptr, nbytes);
  760. return dst;
  761. }
  762. /**
  763. * caam_read_raw_data - Read a raw byte stream as a positive integer.
  764. * The function skips buffer's leading zeros, copies the remained data
  765. * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
  766. * the address of the new buffer.
  767. *
  768. * @buf : The data to read
  769. * @nbytes: The amount of data to read
  770. */
  771. static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
  772. {
  773. caam_rsa_drop_leading_zeros(&buf, nbytes);
  774. if (!*nbytes)
  775. return NULL;
  776. return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
  777. }
  778. static int caam_rsa_check_key_length(unsigned int len)
  779. {
  780. if (len > 4096)
  781. return -EINVAL;
  782. return 0;
  783. }
  784. static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  785. unsigned int keylen)
  786. {
  787. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  788. struct rsa_key raw_key = {NULL};
  789. struct caam_rsa_key *rsa_key = &ctx->key;
  790. int ret;
  791. /* Free the old RSA key if any */
  792. caam_rsa_free_key(rsa_key);
  793. ret = rsa_parse_pub_key(&raw_key, key, keylen);
  794. if (ret)
  795. return ret;
  796. /* Copy key in DMA zone */
  797. rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
  798. if (!rsa_key->e)
  799. goto err;
  800. /*
  801. * Skip leading zeros and copy the positive integer to a buffer
  802. * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
  803. * expects a positive integer for the RSA modulus and uses its length as
  804. * decryption output length.
  805. */
  806. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  807. if (!rsa_key->n)
  808. goto err;
  809. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  810. caam_rsa_free_key(rsa_key);
  811. return -EINVAL;
  812. }
  813. rsa_key->e_sz = raw_key.e_sz;
  814. rsa_key->n_sz = raw_key.n_sz;
  815. return 0;
  816. err:
  817. caam_rsa_free_key(rsa_key);
  818. return -ENOMEM;
  819. }
  820. static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
  821. struct rsa_key *raw_key)
  822. {
  823. struct caam_rsa_key *rsa_key = &ctx->key;
  824. size_t p_sz = raw_key->p_sz;
  825. size_t q_sz = raw_key->q_sz;
  826. rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
  827. if (!rsa_key->p)
  828. return;
  829. rsa_key->p_sz = p_sz;
  830. rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
  831. if (!rsa_key->q)
  832. goto free_p;
  833. rsa_key->q_sz = q_sz;
  834. rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
  835. if (!rsa_key->tmp1)
  836. goto free_q;
  837. rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
  838. if (!rsa_key->tmp2)
  839. goto free_tmp1;
  840. rsa_key->priv_form = FORM2;
  841. rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
  842. if (!rsa_key->dp)
  843. goto free_tmp2;
  844. rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
  845. if (!rsa_key->dq)
  846. goto free_dp;
  847. rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
  848. q_sz);
  849. if (!rsa_key->qinv)
  850. goto free_dq;
  851. rsa_key->priv_form = FORM3;
  852. return;
  853. free_dq:
  854. kfree_sensitive(rsa_key->dq);
  855. free_dp:
  856. kfree_sensitive(rsa_key->dp);
  857. free_tmp2:
  858. kfree_sensitive(rsa_key->tmp2);
  859. free_tmp1:
  860. kfree_sensitive(rsa_key->tmp1);
  861. free_q:
  862. kfree_sensitive(rsa_key->q);
  863. free_p:
  864. kfree_sensitive(rsa_key->p);
  865. }
  866. static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  867. unsigned int keylen)
  868. {
  869. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  870. struct rsa_key raw_key = {NULL};
  871. struct caam_rsa_key *rsa_key = &ctx->key;
  872. int ret;
  873. /* Free the old RSA key if any */
  874. caam_rsa_free_key(rsa_key);
  875. ret = rsa_parse_priv_key(&raw_key, key, keylen);
  876. if (ret)
  877. return ret;
  878. /* Copy key in DMA zone */
  879. rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
  880. if (!rsa_key->d)
  881. goto err;
  882. rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
  883. if (!rsa_key->e)
  884. goto err;
  885. /*
  886. * Skip leading zeros and copy the positive integer to a buffer
  887. * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
  888. * expects a positive integer for the RSA modulus and uses its length as
  889. * decryption output length.
  890. */
  891. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  892. if (!rsa_key->n)
  893. goto err;
  894. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  895. caam_rsa_free_key(rsa_key);
  896. return -EINVAL;
  897. }
  898. rsa_key->d_sz = raw_key.d_sz;
  899. rsa_key->e_sz = raw_key.e_sz;
  900. rsa_key->n_sz = raw_key.n_sz;
  901. caam_rsa_set_priv_key_form(ctx, &raw_key);
  902. return 0;
  903. err:
  904. caam_rsa_free_key(rsa_key);
  905. return -ENOMEM;
  906. }
  907. static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
  908. {
  909. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  910. return ctx->key.n_sz;
  911. }
  912. /* Per session pkc's driver context creation function */
  913. static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
  914. {
  915. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  916. ctx->dev = caam_jr_alloc();
  917. if (IS_ERR(ctx->dev)) {
  918. pr_err("Job Ring Device allocation for transform failed\n");
  919. return PTR_ERR(ctx->dev);
  920. }
  921. ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
  922. CAAM_RSA_MAX_INPUT_SIZE - 1,
  923. DMA_TO_DEVICE);
  924. if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
  925. dev_err(ctx->dev, "unable to map padding\n");
  926. caam_jr_free(ctx->dev);
  927. return -ENOMEM;
  928. }
  929. ctx->enginectx.op.do_one_request = akcipher_do_one_req;
  930. return 0;
  931. }
  932. /* Per session pkc's driver context cleanup function */
  933. static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
  934. {
  935. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
  936. struct caam_rsa_key *key = &ctx->key;
  937. dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
  938. 1, DMA_TO_DEVICE);
  939. caam_rsa_free_key(key);
  940. caam_jr_free(ctx->dev);
  941. }
  942. static struct caam_akcipher_alg caam_rsa = {
  943. .akcipher = {
  944. .encrypt = caam_rsa_enc,
  945. .decrypt = caam_rsa_dec,
  946. .set_pub_key = caam_rsa_set_pub_key,
  947. .set_priv_key = caam_rsa_set_priv_key,
  948. .max_size = caam_rsa_max_size,
  949. .init = caam_rsa_init_tfm,
  950. .exit = caam_rsa_exit_tfm,
  951. .reqsize = sizeof(struct caam_rsa_req_ctx),
  952. .base = {
  953. .cra_name = "rsa",
  954. .cra_driver_name = "rsa-caam",
  955. .cra_priority = 3000,
  956. .cra_module = THIS_MODULE,
  957. .cra_ctxsize = sizeof(struct caam_rsa_ctx),
  958. },
  959. }
  960. };
  961. /* Public Key Cryptography module initialization handler */
  962. int caam_pkc_init(struct device *ctrldev)
  963. {
  964. struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
  965. u32 pk_inst, pkha;
  966. int err;
  967. init_done = false;
  968. /* Determine public key hardware accelerator presence. */
  969. if (priv->era < 10) {
  970. pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
  971. CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
  972. } else {
  973. pkha = rd_reg32(&priv->ctrl->vreg.pkha);
  974. pk_inst = pkha & CHA_VER_NUM_MASK;
  975. /*
  976. * Newer CAAMs support partially disabled functionality. If this is the
  977. * case, the number is non-zero, but this bit is set to indicate that
  978. * no encryption or decryption is supported. Only signing and verifying
  979. * is supported.
  980. */
  981. if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
  982. pk_inst = 0;
  983. }
  984. /* Do not register algorithms if PKHA is not present. */
  985. if (!pk_inst)
  986. return 0;
  987. /* allocate zero buffer, used for padding input */
  988. zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
  989. GFP_KERNEL);
  990. if (!zero_buffer)
  991. return -ENOMEM;
  992. err = crypto_register_akcipher(&caam_rsa.akcipher);
  993. if (err) {
  994. kfree(zero_buffer);
  995. dev_warn(ctrldev, "%s alg registration failed\n",
  996. caam_rsa.akcipher.base.cra_driver_name);
  997. } else {
  998. init_done = true;
  999. caam_rsa.registered = true;
  1000. dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
  1001. }
  1002. return err;
  1003. }
  1004. void caam_pkc_exit(void)
  1005. {
  1006. if (!init_done)
  1007. return;
  1008. if (caam_rsa.registered)
  1009. crypto_unregister_akcipher(&caam_rsa.akcipher);
  1010. kfree(zero_buffer);
  1011. }