hpre_crypto.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/akcipher.h>
  4. #include <crypto/curve25519.h>
  5. #include <crypto/dh.h>
  6. #include <crypto/ecc_curve.h>
  7. #include <crypto/ecdh.h>
  8. #include <crypto/rng.h>
  9. #include <crypto/internal/akcipher.h>
  10. #include <crypto/internal/kpp.h>
  11. #include <crypto/internal/rsa.h>
  12. #include <crypto/kpp.h>
  13. #include <crypto/scatterwalk.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/fips.h>
  16. #include <linux/module.h>
  17. #include <linux/time.h>
  18. #include "hpre.h"
  19. struct hpre_ctx;
  20. #define HPRE_CRYPTO_ALG_PRI 1000
  21. #define HPRE_ALIGN_SZ 64
  22. #define HPRE_BITS_2_BYTES_SHIFT 3
  23. #define HPRE_RSA_512BITS_KSZ 64
  24. #define HPRE_RSA_1536BITS_KSZ 192
  25. #define HPRE_CRT_PRMS 5
  26. #define HPRE_CRT_Q 2
  27. #define HPRE_CRT_P 3
  28. #define HPRE_CRT_INV 4
  29. #define HPRE_DH_G_FLAG 0x02
  30. #define HPRE_TRY_SEND_TIMES 100
  31. #define HPRE_INVLD_REQ_ID (-1)
  32. #define HPRE_SQE_ALG_BITS 5
  33. #define HPRE_SQE_DONE_SHIFT 30
  34. #define HPRE_DH_MAX_P_SZ 512
  35. #define HPRE_DFX_SEC_TO_US 1000000
  36. #define HPRE_DFX_US_TO_NS 1000
  37. /* due to nist p521 */
  38. #define HPRE_ECC_MAX_KSZ 66
  39. /* size in bytes of the n prime */
  40. #define HPRE_ECC_NIST_P192_N_SIZE 24
  41. #define HPRE_ECC_NIST_P256_N_SIZE 32
  42. #define HPRE_ECC_NIST_P384_N_SIZE 48
  43. /* size in bytes */
  44. #define HPRE_ECC_HW256_KSZ_B 32
  45. #define HPRE_ECC_HW384_KSZ_B 48
  46. /* capability register mask of driver */
  47. #define HPRE_DRV_RSA_MASK_CAP BIT(0)
  48. #define HPRE_DRV_DH_MASK_CAP BIT(1)
  49. #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
  50. #define HPRE_DRV_X25519_MASK_CAP BIT(5)
  51. typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
  52. struct hpre_rsa_ctx {
  53. /* low address: e--->n */
  54. char *pubkey;
  55. dma_addr_t dma_pubkey;
  56. /* low address: d--->n */
  57. char *prikey;
  58. dma_addr_t dma_prikey;
  59. /* low address: dq->dp->q->p->qinv */
  60. char *crt_prikey;
  61. dma_addr_t dma_crt_prikey;
  62. struct crypto_akcipher *soft_tfm;
  63. };
  64. struct hpre_dh_ctx {
  65. /*
  66. * If base is g we compute the public key
  67. * ya = g^xa mod p; [RFC2631 sec 2.1.1]
  68. * else if base if the counterpart public key we
  69. * compute the shared secret
  70. * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
  71. * low address: d--->n, please refer to Hisilicon HPRE UM
  72. */
  73. char *xa_p;
  74. dma_addr_t dma_xa_p;
  75. char *g; /* m */
  76. dma_addr_t dma_g;
  77. };
  78. struct hpre_ecdh_ctx {
  79. /* low address: p->a->k->b */
  80. unsigned char *p;
  81. dma_addr_t dma_p;
  82. /* low address: x->y */
  83. unsigned char *g;
  84. dma_addr_t dma_g;
  85. };
  86. struct hpre_curve25519_ctx {
  87. /* low address: p->a->k */
  88. unsigned char *p;
  89. dma_addr_t dma_p;
  90. /* gx coordinate */
  91. unsigned char *g;
  92. dma_addr_t dma_g;
  93. };
  94. struct hpre_ctx {
  95. struct hisi_qp *qp;
  96. struct device *dev;
  97. struct hpre_asym_request **req_list;
  98. struct hpre *hpre;
  99. spinlock_t req_lock;
  100. unsigned int key_sz;
  101. bool crt_g2_mode;
  102. struct idr req_idr;
  103. union {
  104. struct hpre_rsa_ctx rsa;
  105. struct hpre_dh_ctx dh;
  106. struct hpre_ecdh_ctx ecdh;
  107. struct hpre_curve25519_ctx curve25519;
  108. };
  109. /* for ecc algorithms */
  110. unsigned int curve_id;
  111. };
  112. struct hpre_asym_request {
  113. char *src;
  114. char *dst;
  115. struct hpre_sqe req;
  116. struct hpre_ctx *ctx;
  117. union {
  118. struct akcipher_request *rsa;
  119. struct kpp_request *dh;
  120. struct kpp_request *ecdh;
  121. struct kpp_request *curve25519;
  122. } areq;
  123. int err;
  124. int req_id;
  125. hpre_cb cb;
  126. struct timespec64 req_time;
  127. };
  128. static int hpre_alloc_req_id(struct hpre_ctx *ctx)
  129. {
  130. unsigned long flags;
  131. int id;
  132. spin_lock_irqsave(&ctx->req_lock, flags);
  133. id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
  134. spin_unlock_irqrestore(&ctx->req_lock, flags);
  135. return id;
  136. }
  137. static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
  138. {
  139. unsigned long flags;
  140. spin_lock_irqsave(&ctx->req_lock, flags);
  141. idr_remove(&ctx->req_idr, req_id);
  142. spin_unlock_irqrestore(&ctx->req_lock, flags);
  143. }
  144. static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
  145. {
  146. struct hpre_ctx *ctx;
  147. struct hpre_dfx *dfx;
  148. int id;
  149. ctx = hpre_req->ctx;
  150. id = hpre_alloc_req_id(ctx);
  151. if (unlikely(id < 0))
  152. return -EINVAL;
  153. ctx->req_list[id] = hpre_req;
  154. hpre_req->req_id = id;
  155. dfx = ctx->hpre->debug.dfx;
  156. if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
  157. ktime_get_ts64(&hpre_req->req_time);
  158. return id;
  159. }
  160. static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
  161. {
  162. struct hpre_ctx *ctx = hpre_req->ctx;
  163. int id = hpre_req->req_id;
  164. if (hpre_req->req_id >= 0) {
  165. hpre_req->req_id = HPRE_INVLD_REQ_ID;
  166. ctx->req_list[id] = NULL;
  167. hpre_free_req_id(ctx, id);
  168. }
  169. }
  170. static struct hisi_qp *hpre_get_qp_and_start(u8 type)
  171. {
  172. struct hisi_qp *qp;
  173. int ret;
  174. qp = hpre_create_qp(type);
  175. if (!qp) {
  176. pr_err("Can not create hpre qp!\n");
  177. return ERR_PTR(-ENODEV);
  178. }
  179. ret = hisi_qm_start_qp(qp, 0);
  180. if (ret < 0) {
  181. hisi_qm_free_qps(&qp, 1);
  182. pci_err(qp->qm->pdev, "Can not start qp!\n");
  183. return ERR_PTR(-EINVAL);
  184. }
  185. return qp;
  186. }
  187. static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
  188. struct scatterlist *data, unsigned int len,
  189. int is_src, dma_addr_t *tmp)
  190. {
  191. struct device *dev = hpre_req->ctx->dev;
  192. enum dma_data_direction dma_dir;
  193. if (is_src) {
  194. hpre_req->src = NULL;
  195. dma_dir = DMA_TO_DEVICE;
  196. } else {
  197. hpre_req->dst = NULL;
  198. dma_dir = DMA_FROM_DEVICE;
  199. }
  200. *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
  201. if (unlikely(dma_mapping_error(dev, *tmp))) {
  202. dev_err(dev, "dma map data err!\n");
  203. return -ENOMEM;
  204. }
  205. return 0;
  206. }
  207. static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
  208. struct scatterlist *data, unsigned int len,
  209. int is_src, dma_addr_t *tmp)
  210. {
  211. struct hpre_ctx *ctx = hpre_req->ctx;
  212. struct device *dev = ctx->dev;
  213. void *ptr;
  214. int shift;
  215. shift = ctx->key_sz - len;
  216. if (unlikely(shift < 0))
  217. return -EINVAL;
  218. ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
  219. if (unlikely(!ptr))
  220. return -ENOMEM;
  221. if (is_src) {
  222. scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
  223. hpre_req->src = ptr;
  224. } else {
  225. hpre_req->dst = ptr;
  226. }
  227. return 0;
  228. }
  229. static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
  230. struct scatterlist *data, unsigned int len,
  231. int is_src, int is_dh)
  232. {
  233. struct hpre_sqe *msg = &hpre_req->req;
  234. struct hpre_ctx *ctx = hpre_req->ctx;
  235. dma_addr_t tmp = 0;
  236. int ret;
  237. /* when the data is dh's source, we should format it */
  238. if ((sg_is_last(data) && len == ctx->key_sz) &&
  239. ((is_dh && !is_src) || !is_dh))
  240. ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
  241. else
  242. ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
  243. if (unlikely(ret))
  244. return ret;
  245. if (is_src)
  246. msg->in = cpu_to_le64(tmp);
  247. else
  248. msg->out = cpu_to_le64(tmp);
  249. return 0;
  250. }
  251. static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
  252. struct hpre_asym_request *req,
  253. struct scatterlist *dst,
  254. struct scatterlist *src)
  255. {
  256. struct device *dev = ctx->dev;
  257. struct hpre_sqe *sqe = &req->req;
  258. dma_addr_t tmp;
  259. tmp = le64_to_cpu(sqe->in);
  260. if (unlikely(dma_mapping_error(dev, tmp)))
  261. return;
  262. if (src) {
  263. if (req->src)
  264. dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
  265. else
  266. dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
  267. }
  268. tmp = le64_to_cpu(sqe->out);
  269. if (unlikely(dma_mapping_error(dev, tmp)))
  270. return;
  271. if (req->dst) {
  272. if (dst)
  273. scatterwalk_map_and_copy(req->dst, dst, 0,
  274. ctx->key_sz, 1);
  275. dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
  276. } else {
  277. dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
  278. }
  279. }
  280. static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
  281. void **kreq)
  282. {
  283. struct hpre_asym_request *req;
  284. unsigned int err, done, alg;
  285. int id;
  286. #define HPRE_NO_HW_ERR 0
  287. #define HPRE_HW_TASK_DONE 3
  288. #define HREE_HW_ERR_MASK GENMASK(10, 0)
  289. #define HREE_SQE_DONE_MASK GENMASK(1, 0)
  290. #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
  291. id = (int)le16_to_cpu(sqe->tag);
  292. req = ctx->req_list[id];
  293. hpre_rm_req_from_ctx(req);
  294. *kreq = req;
  295. err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
  296. HREE_HW_ERR_MASK;
  297. done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
  298. HREE_SQE_DONE_MASK;
  299. if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
  300. return 0;
  301. alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
  302. dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
  303. alg, done, err);
  304. return -EINVAL;
  305. }
  306. static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
  307. {
  308. struct hpre *hpre;
  309. if (!ctx || !qp || qlen < 0)
  310. return -EINVAL;
  311. spin_lock_init(&ctx->req_lock);
  312. ctx->qp = qp;
  313. ctx->dev = &qp->qm->pdev->dev;
  314. hpre = container_of(ctx->qp->qm, struct hpre, qm);
  315. ctx->hpre = hpre;
  316. ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
  317. if (!ctx->req_list)
  318. return -ENOMEM;
  319. ctx->key_sz = 0;
  320. ctx->crt_g2_mode = false;
  321. idr_init(&ctx->req_idr);
  322. return 0;
  323. }
  324. static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
  325. {
  326. if (is_clear_all) {
  327. idr_destroy(&ctx->req_idr);
  328. kfree(ctx->req_list);
  329. hisi_qm_free_qps(&ctx->qp, 1);
  330. }
  331. ctx->crt_g2_mode = false;
  332. ctx->key_sz = 0;
  333. }
  334. static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
  335. u64 overtime_thrhld)
  336. {
  337. struct timespec64 reply_time;
  338. u64 time_use_us;
  339. ktime_get_ts64(&reply_time);
  340. time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
  341. HPRE_DFX_SEC_TO_US +
  342. (reply_time.tv_nsec - req->req_time.tv_nsec) /
  343. HPRE_DFX_US_TO_NS;
  344. if (time_use_us <= overtime_thrhld)
  345. return false;
  346. return true;
  347. }
  348. static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
  349. {
  350. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  351. struct hpre_asym_request *req;
  352. struct kpp_request *areq;
  353. u64 overtime_thrhld;
  354. int ret;
  355. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  356. areq = req->areq.dh;
  357. areq->dst_len = ctx->key_sz;
  358. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  359. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  360. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  361. hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  362. kpp_request_complete(areq, ret);
  363. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  364. }
  365. static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
  366. {
  367. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  368. struct hpre_asym_request *req;
  369. struct akcipher_request *areq;
  370. u64 overtime_thrhld;
  371. int ret;
  372. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  373. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  374. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  375. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  376. areq = req->areq.rsa;
  377. areq->dst_len = ctx->key_sz;
  378. hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  379. akcipher_request_complete(areq, ret);
  380. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  381. }
  382. static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
  383. {
  384. struct hpre_ctx *ctx = qp->qp_ctx;
  385. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  386. struct hpre_sqe *sqe = resp;
  387. struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
  388. if (unlikely(!req)) {
  389. atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
  390. return;
  391. }
  392. req->cb(ctx, resp);
  393. }
  394. static void hpre_stop_qp_and_put(struct hisi_qp *qp)
  395. {
  396. hisi_qm_stop_qp(qp);
  397. hisi_qm_free_qps(&qp, 1);
  398. }
  399. static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
  400. {
  401. struct hisi_qp *qp;
  402. int ret;
  403. qp = hpre_get_qp_and_start(type);
  404. if (IS_ERR(qp))
  405. return PTR_ERR(qp);
  406. qp->qp_ctx = ctx;
  407. qp->req_cb = hpre_alg_cb;
  408. ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
  409. if (ret)
  410. hpre_stop_qp_and_put(qp);
  411. return ret;
  412. }
  413. static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
  414. {
  415. struct hpre_asym_request *h_req;
  416. struct hpre_sqe *msg;
  417. int req_id;
  418. void *tmp;
  419. if (is_rsa) {
  420. struct akcipher_request *akreq = req;
  421. if (akreq->dst_len < ctx->key_sz) {
  422. akreq->dst_len = ctx->key_sz;
  423. return -EOVERFLOW;
  424. }
  425. tmp = akcipher_request_ctx(akreq);
  426. h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  427. h_req->cb = hpre_rsa_cb;
  428. h_req->areq.rsa = akreq;
  429. msg = &h_req->req;
  430. memset(msg, 0, sizeof(*msg));
  431. } else {
  432. struct kpp_request *kreq = req;
  433. if (kreq->dst_len < ctx->key_sz) {
  434. kreq->dst_len = ctx->key_sz;
  435. return -EOVERFLOW;
  436. }
  437. tmp = kpp_request_ctx(kreq);
  438. h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  439. h_req->cb = hpre_dh_cb;
  440. h_req->areq.dh = kreq;
  441. msg = &h_req->req;
  442. memset(msg, 0, sizeof(*msg));
  443. msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
  444. }
  445. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  446. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  447. msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
  448. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  449. h_req->ctx = ctx;
  450. req_id = hpre_add_req_to_ctx(h_req);
  451. if (req_id < 0)
  452. return -EBUSY;
  453. msg->tag = cpu_to_le16((u16)req_id);
  454. return 0;
  455. }
  456. static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
  457. {
  458. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  459. int ctr = 0;
  460. int ret;
  461. do {
  462. atomic64_inc(&dfx[HPRE_SEND_CNT].value);
  463. ret = hisi_qp_send(ctx->qp, msg);
  464. if (ret != -EBUSY)
  465. break;
  466. atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
  467. } while (ctr++ < HPRE_TRY_SEND_TIMES);
  468. if (likely(!ret))
  469. return ret;
  470. if (ret != -EBUSY)
  471. atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
  472. return ret;
  473. }
  474. static int hpre_dh_compute_value(struct kpp_request *req)
  475. {
  476. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  477. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  478. void *tmp = kpp_request_ctx(req);
  479. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  480. struct hpre_sqe *msg = &hpre_req->req;
  481. int ret;
  482. ret = hpre_msg_request_set(ctx, req, false);
  483. if (unlikely(ret))
  484. return ret;
  485. if (req->src) {
  486. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
  487. if (unlikely(ret))
  488. goto clear_all;
  489. } else {
  490. msg->in = cpu_to_le64(ctx->dh.dma_g);
  491. }
  492. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
  493. if (unlikely(ret))
  494. goto clear_all;
  495. if (ctx->crt_g2_mode && !req->src)
  496. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
  497. else
  498. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
  499. /* success */
  500. ret = hpre_send(ctx, msg);
  501. if (likely(!ret))
  502. return -EINPROGRESS;
  503. clear_all:
  504. hpre_rm_req_from_ctx(hpre_req);
  505. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  506. return ret;
  507. }
  508. static int hpre_is_dh_params_length_valid(unsigned int key_sz)
  509. {
  510. #define _HPRE_DH_GRP1 768
  511. #define _HPRE_DH_GRP2 1024
  512. #define _HPRE_DH_GRP5 1536
  513. #define _HPRE_DH_GRP14 2048
  514. #define _HPRE_DH_GRP15 3072
  515. #define _HPRE_DH_GRP16 4096
  516. switch (key_sz) {
  517. case _HPRE_DH_GRP1:
  518. case _HPRE_DH_GRP2:
  519. case _HPRE_DH_GRP5:
  520. case _HPRE_DH_GRP14:
  521. case _HPRE_DH_GRP15:
  522. case _HPRE_DH_GRP16:
  523. return 0;
  524. default:
  525. return -EINVAL;
  526. }
  527. }
  528. static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
  529. {
  530. struct device *dev = ctx->dev;
  531. unsigned int sz;
  532. if (params->p_size > HPRE_DH_MAX_P_SZ)
  533. return -EINVAL;
  534. if (hpre_is_dh_params_length_valid(params->p_size <<
  535. HPRE_BITS_2_BYTES_SHIFT))
  536. return -EINVAL;
  537. sz = ctx->key_sz = params->p_size;
  538. ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
  539. &ctx->dh.dma_xa_p, GFP_KERNEL);
  540. if (!ctx->dh.xa_p)
  541. return -ENOMEM;
  542. memcpy(ctx->dh.xa_p + sz, params->p, sz);
  543. /* If g equals 2 don't copy it */
  544. if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
  545. ctx->crt_g2_mode = true;
  546. return 0;
  547. }
  548. ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
  549. if (!ctx->dh.g) {
  550. dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
  551. ctx->dh.dma_xa_p);
  552. ctx->dh.xa_p = NULL;
  553. return -ENOMEM;
  554. }
  555. memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
  556. return 0;
  557. }
  558. static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
  559. {
  560. struct device *dev = ctx->dev;
  561. unsigned int sz = ctx->key_sz;
  562. if (is_clear_all)
  563. hisi_qm_stop_qp(ctx->qp);
  564. if (ctx->dh.g) {
  565. dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
  566. ctx->dh.g = NULL;
  567. }
  568. if (ctx->dh.xa_p) {
  569. memzero_explicit(ctx->dh.xa_p, sz);
  570. dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
  571. ctx->dh.dma_xa_p);
  572. ctx->dh.xa_p = NULL;
  573. }
  574. hpre_ctx_clear(ctx, is_clear_all);
  575. }
  576. static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
  577. unsigned int len)
  578. {
  579. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  580. struct dh params;
  581. int ret;
  582. if (crypto_dh_decode_key(buf, len, &params) < 0)
  583. return -EINVAL;
  584. /* Free old secret if any */
  585. hpre_dh_clear_ctx(ctx, false);
  586. ret = hpre_dh_set_params(ctx, &params);
  587. if (ret < 0)
  588. goto err_clear_ctx;
  589. memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
  590. params.key_size);
  591. return 0;
  592. err_clear_ctx:
  593. hpre_dh_clear_ctx(ctx, false);
  594. return ret;
  595. }
  596. static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
  597. {
  598. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  599. return ctx->key_sz;
  600. }
  601. static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
  602. {
  603. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  604. return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
  605. }
  606. static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
  607. {
  608. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  609. hpre_dh_clear_ctx(ctx, true);
  610. }
  611. static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
  612. {
  613. while (!**ptr && *len) {
  614. (*ptr)++;
  615. (*len)--;
  616. }
  617. }
  618. static bool hpre_rsa_key_size_is_support(unsigned int len)
  619. {
  620. unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
  621. #define _RSA_1024BITS_KEY_WDTH 1024
  622. #define _RSA_2048BITS_KEY_WDTH 2048
  623. #define _RSA_3072BITS_KEY_WDTH 3072
  624. #define _RSA_4096BITS_KEY_WDTH 4096
  625. switch (bits) {
  626. case _RSA_1024BITS_KEY_WDTH:
  627. case _RSA_2048BITS_KEY_WDTH:
  628. case _RSA_3072BITS_KEY_WDTH:
  629. case _RSA_4096BITS_KEY_WDTH:
  630. return true;
  631. default:
  632. return false;
  633. }
  634. }
  635. static int hpre_rsa_enc(struct akcipher_request *req)
  636. {
  637. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  638. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  639. void *tmp = akcipher_request_ctx(req);
  640. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  641. struct hpre_sqe *msg = &hpre_req->req;
  642. int ret;
  643. /* For 512 and 1536 bits key size, use soft tfm instead */
  644. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  645. ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
  646. akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
  647. ret = crypto_akcipher_encrypt(req);
  648. akcipher_request_set_tfm(req, tfm);
  649. return ret;
  650. }
  651. if (unlikely(!ctx->rsa.pubkey))
  652. return -EINVAL;
  653. ret = hpre_msg_request_set(ctx, req, true);
  654. if (unlikely(ret))
  655. return ret;
  656. msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
  657. msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
  658. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
  659. if (unlikely(ret))
  660. goto clear_all;
  661. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
  662. if (unlikely(ret))
  663. goto clear_all;
  664. /* success */
  665. ret = hpre_send(ctx, msg);
  666. if (likely(!ret))
  667. return -EINPROGRESS;
  668. clear_all:
  669. hpre_rm_req_from_ctx(hpre_req);
  670. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  671. return ret;
  672. }
  673. static int hpre_rsa_dec(struct akcipher_request *req)
  674. {
  675. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  676. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  677. void *tmp = akcipher_request_ctx(req);
  678. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  679. struct hpre_sqe *msg = &hpre_req->req;
  680. int ret;
  681. /* For 512 and 1536 bits key size, use soft tfm instead */
  682. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  683. ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
  684. akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
  685. ret = crypto_akcipher_decrypt(req);
  686. akcipher_request_set_tfm(req, tfm);
  687. return ret;
  688. }
  689. if (unlikely(!ctx->rsa.prikey))
  690. return -EINVAL;
  691. ret = hpre_msg_request_set(ctx, req, true);
  692. if (unlikely(ret))
  693. return ret;
  694. if (ctx->crt_g2_mode) {
  695. msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
  696. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
  697. HPRE_ALG_NC_CRT);
  698. } else {
  699. msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
  700. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
  701. HPRE_ALG_NC_NCRT);
  702. }
  703. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
  704. if (unlikely(ret))
  705. goto clear_all;
  706. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
  707. if (unlikely(ret))
  708. goto clear_all;
  709. /* success */
  710. ret = hpre_send(ctx, msg);
  711. if (likely(!ret))
  712. return -EINPROGRESS;
  713. clear_all:
  714. hpre_rm_req_from_ctx(hpre_req);
  715. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  716. return ret;
  717. }
  718. static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
  719. size_t vlen, bool private)
  720. {
  721. const char *ptr = value;
  722. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  723. ctx->key_sz = vlen;
  724. /* if invalid key size provided, we use software tfm */
  725. if (!hpre_rsa_key_size_is_support(ctx->key_sz))
  726. return 0;
  727. ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
  728. &ctx->rsa.dma_pubkey,
  729. GFP_KERNEL);
  730. if (!ctx->rsa.pubkey)
  731. return -ENOMEM;
  732. if (private) {
  733. ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
  734. &ctx->rsa.dma_prikey,
  735. GFP_KERNEL);
  736. if (!ctx->rsa.prikey) {
  737. dma_free_coherent(ctx->dev, vlen << 1,
  738. ctx->rsa.pubkey,
  739. ctx->rsa.dma_pubkey);
  740. ctx->rsa.pubkey = NULL;
  741. return -ENOMEM;
  742. }
  743. memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
  744. }
  745. memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
  746. /* Using hardware HPRE to do RSA */
  747. return 1;
  748. }
  749. static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
  750. size_t vlen)
  751. {
  752. const char *ptr = value;
  753. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  754. if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
  755. return -EINVAL;
  756. memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
  757. return 0;
  758. }
  759. static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
  760. size_t vlen)
  761. {
  762. const char *ptr = value;
  763. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  764. if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
  765. return -EINVAL;
  766. memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
  767. return 0;
  768. }
  769. static int hpre_crt_para_get(char *para, size_t para_sz,
  770. const char *raw, size_t raw_sz)
  771. {
  772. const char *ptr = raw;
  773. size_t len = raw_sz;
  774. hpre_rsa_drop_leading_zeros(&ptr, &len);
  775. if (!len || len > para_sz)
  776. return -EINVAL;
  777. memcpy(para + para_sz - len, ptr, len);
  778. return 0;
  779. }
  780. static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
  781. {
  782. unsigned int hlf_ksz = ctx->key_sz >> 1;
  783. struct device *dev = ctx->dev;
  784. u64 offset;
  785. int ret;
  786. ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
  787. &ctx->rsa.dma_crt_prikey,
  788. GFP_KERNEL);
  789. if (!ctx->rsa.crt_prikey)
  790. return -ENOMEM;
  791. ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
  792. rsa_key->dq, rsa_key->dq_sz);
  793. if (ret)
  794. goto free_key;
  795. offset = hlf_ksz;
  796. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  797. rsa_key->dp, rsa_key->dp_sz);
  798. if (ret)
  799. goto free_key;
  800. offset = hlf_ksz * HPRE_CRT_Q;
  801. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  802. rsa_key->q, rsa_key->q_sz);
  803. if (ret)
  804. goto free_key;
  805. offset = hlf_ksz * HPRE_CRT_P;
  806. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  807. rsa_key->p, rsa_key->p_sz);
  808. if (ret)
  809. goto free_key;
  810. offset = hlf_ksz * HPRE_CRT_INV;
  811. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  812. rsa_key->qinv, rsa_key->qinv_sz);
  813. if (ret)
  814. goto free_key;
  815. ctx->crt_g2_mode = true;
  816. return 0;
  817. free_key:
  818. offset = hlf_ksz * HPRE_CRT_PRMS;
  819. memzero_explicit(ctx->rsa.crt_prikey, offset);
  820. dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
  821. ctx->rsa.dma_crt_prikey);
  822. ctx->rsa.crt_prikey = NULL;
  823. ctx->crt_g2_mode = false;
  824. return ret;
  825. }
  826. /* If it is clear all, all the resources of the QP will be cleaned. */
  827. static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
  828. {
  829. unsigned int half_key_sz = ctx->key_sz >> 1;
  830. struct device *dev = ctx->dev;
  831. if (is_clear_all)
  832. hisi_qm_stop_qp(ctx->qp);
  833. if (ctx->rsa.pubkey) {
  834. dma_free_coherent(dev, ctx->key_sz << 1,
  835. ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
  836. ctx->rsa.pubkey = NULL;
  837. }
  838. if (ctx->rsa.crt_prikey) {
  839. memzero_explicit(ctx->rsa.crt_prikey,
  840. half_key_sz * HPRE_CRT_PRMS);
  841. dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
  842. ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
  843. ctx->rsa.crt_prikey = NULL;
  844. }
  845. if (ctx->rsa.prikey) {
  846. memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
  847. dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
  848. ctx->rsa.dma_prikey);
  849. ctx->rsa.prikey = NULL;
  850. }
  851. hpre_ctx_clear(ctx, is_clear_all);
  852. }
  853. /*
  854. * we should judge if it is CRT or not,
  855. * CRT: return true, N-CRT: return false .
  856. */
  857. static bool hpre_is_crt_key(struct rsa_key *key)
  858. {
  859. u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
  860. key->qinv_sz;
  861. #define LEN_OF_NCRT_PARA 5
  862. /* N-CRT less than 5 parameters */
  863. return len > LEN_OF_NCRT_PARA;
  864. }
  865. static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
  866. unsigned int keylen, bool private)
  867. {
  868. struct rsa_key rsa_key;
  869. int ret;
  870. hpre_rsa_clear_ctx(ctx, false);
  871. if (private)
  872. ret = rsa_parse_priv_key(&rsa_key, key, keylen);
  873. else
  874. ret = rsa_parse_pub_key(&rsa_key, key, keylen);
  875. if (ret < 0)
  876. return ret;
  877. ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
  878. if (ret <= 0)
  879. return ret;
  880. if (private) {
  881. ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
  882. if (ret < 0)
  883. goto free;
  884. if (hpre_is_crt_key(&rsa_key)) {
  885. ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
  886. if (ret < 0)
  887. goto free;
  888. }
  889. }
  890. ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
  891. if (ret < 0)
  892. goto free;
  893. if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
  894. ret = -EINVAL;
  895. goto free;
  896. }
  897. return 0;
  898. free:
  899. hpre_rsa_clear_ctx(ctx, false);
  900. return ret;
  901. }
  902. static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
  903. unsigned int keylen)
  904. {
  905. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  906. int ret;
  907. ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
  908. if (ret)
  909. return ret;
  910. return hpre_rsa_setkey(ctx, key, keylen, false);
  911. }
  912. static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
  913. unsigned int keylen)
  914. {
  915. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  916. int ret;
  917. ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
  918. if (ret)
  919. return ret;
  920. return hpre_rsa_setkey(ctx, key, keylen, true);
  921. }
  922. static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
  923. {
  924. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  925. /* For 512 and 1536 bits key size, use soft tfm instead */
  926. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  927. ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
  928. return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
  929. return ctx->key_sz;
  930. }
  931. static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
  932. {
  933. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  934. int ret;
  935. ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
  936. if (IS_ERR(ctx->rsa.soft_tfm)) {
  937. pr_err("Can not alloc_akcipher!\n");
  938. return PTR_ERR(ctx->rsa.soft_tfm);
  939. }
  940. ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
  941. if (ret)
  942. crypto_free_akcipher(ctx->rsa.soft_tfm);
  943. return ret;
  944. }
  945. static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
  946. {
  947. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  948. hpre_rsa_clear_ctx(ctx, true);
  949. crypto_free_akcipher(ctx->rsa.soft_tfm);
  950. }
  951. static void hpre_key_to_big_end(u8 *data, int len)
  952. {
  953. int i, j;
  954. for (i = 0; i < len / 2; i++) {
  955. j = len - i - 1;
  956. swap(data[j], data[i]);
  957. }
  958. }
  959. static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
  960. bool is_ecdh)
  961. {
  962. struct device *dev = ctx->dev;
  963. unsigned int sz = ctx->key_sz;
  964. unsigned int shift = sz << 1;
  965. if (is_clear_all)
  966. hisi_qm_stop_qp(ctx->qp);
  967. if (is_ecdh && ctx->ecdh.p) {
  968. /* ecdh: p->a->k->b */
  969. memzero_explicit(ctx->ecdh.p + shift, sz);
  970. dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
  971. ctx->ecdh.p = NULL;
  972. } else if (!is_ecdh && ctx->curve25519.p) {
  973. /* curve25519: p->a->k */
  974. memzero_explicit(ctx->curve25519.p + shift, sz);
  975. dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
  976. ctx->curve25519.dma_p);
  977. ctx->curve25519.p = NULL;
  978. }
  979. hpre_ctx_clear(ctx, is_clear_all);
  980. }
  981. /*
  982. * The bits of 192/224/256/384/521 are supported by HPRE,
  983. * and convert the bits like:
  984. * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
  985. * If the parameter bit width is insufficient, then we fill in the
  986. * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
  987. */
  988. static unsigned int hpre_ecdh_supported_curve(unsigned short id)
  989. {
  990. switch (id) {
  991. case ECC_CURVE_NIST_P192:
  992. case ECC_CURVE_NIST_P256:
  993. return HPRE_ECC_HW256_KSZ_B;
  994. case ECC_CURVE_NIST_P384:
  995. return HPRE_ECC_HW384_KSZ_B;
  996. default:
  997. break;
  998. }
  999. return 0;
  1000. }
  1001. static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
  1002. {
  1003. unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
  1004. u8 i = 0;
  1005. while (i < ndigits - 1) {
  1006. memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
  1007. i++;
  1008. }
  1009. memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
  1010. hpre_key_to_big_end((u8 *)addr, cur_sz);
  1011. }
  1012. static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
  1013. unsigned int cur_sz)
  1014. {
  1015. unsigned int shifta = ctx->key_sz << 1;
  1016. unsigned int shiftb = ctx->key_sz << 2;
  1017. void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
  1018. void *a = ctx->ecdh.p + shifta - cur_sz;
  1019. void *b = ctx->ecdh.p + shiftb - cur_sz;
  1020. void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
  1021. void *y = ctx->ecdh.g + shifta - cur_sz;
  1022. const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
  1023. char *n;
  1024. if (unlikely(!curve))
  1025. return -EINVAL;
  1026. n = kzalloc(ctx->key_sz, GFP_KERNEL);
  1027. if (!n)
  1028. return -ENOMEM;
  1029. fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
  1030. fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
  1031. fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
  1032. fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
  1033. fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
  1034. fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
  1035. if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
  1036. kfree(n);
  1037. return -EINVAL;
  1038. }
  1039. kfree(n);
  1040. return 0;
  1041. }
  1042. static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
  1043. {
  1044. switch (id) {
  1045. case ECC_CURVE_NIST_P192:
  1046. return HPRE_ECC_NIST_P192_N_SIZE;
  1047. case ECC_CURVE_NIST_P256:
  1048. return HPRE_ECC_NIST_P256_N_SIZE;
  1049. case ECC_CURVE_NIST_P384:
  1050. return HPRE_ECC_NIST_P384_N_SIZE;
  1051. default:
  1052. break;
  1053. }
  1054. return 0;
  1055. }
  1056. static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
  1057. {
  1058. struct device *dev = ctx->dev;
  1059. unsigned int sz, shift, curve_sz;
  1060. int ret;
  1061. ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
  1062. if (!ctx->key_sz)
  1063. return -EINVAL;
  1064. curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
  1065. if (!curve_sz || params->key_size > curve_sz)
  1066. return -EINVAL;
  1067. sz = ctx->key_sz;
  1068. if (!ctx->ecdh.p) {
  1069. ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
  1070. GFP_KERNEL);
  1071. if (!ctx->ecdh.p)
  1072. return -ENOMEM;
  1073. }
  1074. shift = sz << 2;
  1075. ctx->ecdh.g = ctx->ecdh.p + shift;
  1076. ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
  1077. ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
  1078. if (ret) {
  1079. dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
  1080. dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
  1081. ctx->ecdh.p = NULL;
  1082. return ret;
  1083. }
  1084. return 0;
  1085. }
  1086. static bool hpre_key_is_zero(char *key, unsigned short key_sz)
  1087. {
  1088. int i;
  1089. for (i = 0; i < key_sz; i++)
  1090. if (key[i])
  1091. return false;
  1092. return true;
  1093. }
  1094. static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
  1095. {
  1096. struct device *dev = ctx->dev;
  1097. int ret;
  1098. ret = crypto_get_default_rng();
  1099. if (ret) {
  1100. dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
  1101. return ret;
  1102. }
  1103. ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
  1104. params->key_size);
  1105. crypto_put_default_rng();
  1106. if (ret)
  1107. dev_err(dev, "failed to get rng, ret = %d!\n", ret);
  1108. return ret;
  1109. }
  1110. static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
  1111. unsigned int len)
  1112. {
  1113. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1114. struct device *dev = ctx->dev;
  1115. char key[HPRE_ECC_MAX_KSZ];
  1116. unsigned int sz, sz_shift;
  1117. struct ecdh params;
  1118. int ret;
  1119. if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
  1120. dev_err(dev, "failed to decode ecdh key!\n");
  1121. return -EINVAL;
  1122. }
  1123. /* Use stdrng to generate private key */
  1124. if (!params.key || !params.key_size) {
  1125. params.key = key;
  1126. params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
  1127. ret = ecdh_gen_privkey(ctx, &params);
  1128. if (ret)
  1129. return ret;
  1130. }
  1131. if (hpre_key_is_zero(params.key, params.key_size)) {
  1132. dev_err(dev, "Invalid hpre key!\n");
  1133. return -EINVAL;
  1134. }
  1135. hpre_ecc_clear_ctx(ctx, false, true);
  1136. ret = hpre_ecdh_set_param(ctx, &params);
  1137. if (ret < 0) {
  1138. dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
  1139. return ret;
  1140. }
  1141. sz = ctx->key_sz;
  1142. sz_shift = (sz << 1) + sz - params.key_size;
  1143. memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
  1144. return 0;
  1145. }
  1146. static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
  1147. struct hpre_asym_request *req,
  1148. struct scatterlist *dst,
  1149. struct scatterlist *src)
  1150. {
  1151. struct device *dev = ctx->dev;
  1152. struct hpre_sqe *sqe = &req->req;
  1153. dma_addr_t dma;
  1154. dma = le64_to_cpu(sqe->in);
  1155. if (unlikely(dma_mapping_error(dev, dma)))
  1156. return;
  1157. if (src && req->src)
  1158. dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
  1159. dma = le64_to_cpu(sqe->out);
  1160. if (unlikely(dma_mapping_error(dev, dma)))
  1161. return;
  1162. if (req->dst)
  1163. dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
  1164. if (dst)
  1165. dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
  1166. }
  1167. static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
  1168. {
  1169. unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
  1170. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  1171. struct hpre_asym_request *req = NULL;
  1172. struct kpp_request *areq;
  1173. u64 overtime_thrhld;
  1174. char *p;
  1175. int ret;
  1176. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  1177. areq = req->areq.ecdh;
  1178. areq->dst_len = ctx->key_sz << 1;
  1179. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  1180. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  1181. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  1182. p = sg_virt(areq->dst);
  1183. memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
  1184. memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
  1185. hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  1186. kpp_request_complete(areq, ret);
  1187. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  1188. }
  1189. static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
  1190. struct kpp_request *req)
  1191. {
  1192. struct hpre_asym_request *h_req;
  1193. struct hpre_sqe *msg;
  1194. int req_id;
  1195. void *tmp;
  1196. if (req->dst_len < ctx->key_sz << 1) {
  1197. req->dst_len = ctx->key_sz << 1;
  1198. return -EINVAL;
  1199. }
  1200. tmp = kpp_request_ctx(req);
  1201. h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  1202. h_req->cb = hpre_ecdh_cb;
  1203. h_req->areq.ecdh = req;
  1204. msg = &h_req->req;
  1205. memset(msg, 0, sizeof(*msg));
  1206. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  1207. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  1208. msg->key = cpu_to_le64(ctx->ecdh.dma_p);
  1209. msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
  1210. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  1211. h_req->ctx = ctx;
  1212. req_id = hpre_add_req_to_ctx(h_req);
  1213. if (req_id < 0)
  1214. return -EBUSY;
  1215. msg->tag = cpu_to_le16((u16)req_id);
  1216. return 0;
  1217. }
  1218. static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
  1219. struct scatterlist *data, unsigned int len)
  1220. {
  1221. struct hpre_sqe *msg = &hpre_req->req;
  1222. struct hpre_ctx *ctx = hpre_req->ctx;
  1223. struct device *dev = ctx->dev;
  1224. unsigned int tmpshift;
  1225. dma_addr_t dma = 0;
  1226. void *ptr;
  1227. int shift;
  1228. /* Src_data include gx and gy. */
  1229. shift = ctx->key_sz - (len >> 1);
  1230. if (unlikely(shift < 0))
  1231. return -EINVAL;
  1232. ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
  1233. if (unlikely(!ptr))
  1234. return -ENOMEM;
  1235. tmpshift = ctx->key_sz << 1;
  1236. scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
  1237. memcpy(ptr + shift, ptr + tmpshift, len >> 1);
  1238. memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
  1239. hpre_req->src = ptr;
  1240. msg->in = cpu_to_le64(dma);
  1241. return 0;
  1242. }
  1243. static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
  1244. struct scatterlist *data, unsigned int len)
  1245. {
  1246. struct hpre_sqe *msg = &hpre_req->req;
  1247. struct hpre_ctx *ctx = hpre_req->ctx;
  1248. struct device *dev = ctx->dev;
  1249. dma_addr_t dma;
  1250. if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
  1251. dev_err(dev, "data or data length is illegal!\n");
  1252. return -EINVAL;
  1253. }
  1254. hpre_req->dst = NULL;
  1255. dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
  1256. if (unlikely(dma_mapping_error(dev, dma))) {
  1257. dev_err(dev, "dma map data err!\n");
  1258. return -ENOMEM;
  1259. }
  1260. msg->out = cpu_to_le64(dma);
  1261. return 0;
  1262. }
  1263. static int hpre_ecdh_compute_value(struct kpp_request *req)
  1264. {
  1265. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  1266. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1267. struct device *dev = ctx->dev;
  1268. void *tmp = kpp_request_ctx(req);
  1269. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  1270. struct hpre_sqe *msg = &hpre_req->req;
  1271. int ret;
  1272. ret = hpre_ecdh_msg_request_set(ctx, req);
  1273. if (unlikely(ret)) {
  1274. dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
  1275. return ret;
  1276. }
  1277. if (req->src) {
  1278. ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
  1279. if (unlikely(ret)) {
  1280. dev_err(dev, "failed to init src data, ret = %d!\n", ret);
  1281. goto clear_all;
  1282. }
  1283. } else {
  1284. msg->in = cpu_to_le64(ctx->ecdh.dma_g);
  1285. }
  1286. ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
  1287. if (unlikely(ret)) {
  1288. dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
  1289. goto clear_all;
  1290. }
  1291. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
  1292. ret = hpre_send(ctx, msg);
  1293. if (likely(!ret))
  1294. return -EINPROGRESS;
  1295. clear_all:
  1296. hpre_rm_req_from_ctx(hpre_req);
  1297. hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  1298. return ret;
  1299. }
  1300. static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
  1301. {
  1302. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1303. /* max size is the pub_key_size, include x and y */
  1304. return ctx->key_sz << 1;
  1305. }
  1306. static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
  1307. {
  1308. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1309. ctx->curve_id = ECC_CURVE_NIST_P192;
  1310. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1311. }
  1312. static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
  1313. {
  1314. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1315. ctx->curve_id = ECC_CURVE_NIST_P256;
  1316. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1317. }
  1318. static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
  1319. {
  1320. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1321. ctx->curve_id = ECC_CURVE_NIST_P384;
  1322. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1323. }
  1324. static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
  1325. {
  1326. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1327. hpre_ecc_clear_ctx(ctx, true, true);
  1328. }
  1329. static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
  1330. unsigned int len)
  1331. {
  1332. u8 secret[CURVE25519_KEY_SIZE] = { 0 };
  1333. unsigned int sz = ctx->key_sz;
  1334. const struct ecc_curve *curve;
  1335. unsigned int shift = sz << 1;
  1336. void *p;
  1337. /*
  1338. * The key from 'buf' is in little-endian, we should preprocess it as
  1339. * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
  1340. * then convert it to big endian. Only in this way, the result can be
  1341. * the same as the software curve-25519 that exists in crypto.
  1342. */
  1343. memcpy(secret, buf, len);
  1344. curve25519_clamp_secret(secret);
  1345. hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
  1346. p = ctx->curve25519.p + sz - len;
  1347. curve = ecc_get_curve25519();
  1348. /* fill curve parameters */
  1349. fill_curve_param(p, curve->p, len, curve->g.ndigits);
  1350. fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
  1351. memcpy(p + shift, secret, len);
  1352. fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
  1353. memzero_explicit(secret, CURVE25519_KEY_SIZE);
  1354. }
  1355. static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
  1356. unsigned int len)
  1357. {
  1358. struct device *dev = ctx->dev;
  1359. unsigned int sz = ctx->key_sz;
  1360. unsigned int shift = sz << 1;
  1361. /* p->a->k->gx */
  1362. if (!ctx->curve25519.p) {
  1363. ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
  1364. &ctx->curve25519.dma_p,
  1365. GFP_KERNEL);
  1366. if (!ctx->curve25519.p)
  1367. return -ENOMEM;
  1368. }
  1369. ctx->curve25519.g = ctx->curve25519.p + shift + sz;
  1370. ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
  1371. hpre_curve25519_fill_curve(ctx, buf, len);
  1372. return 0;
  1373. }
  1374. static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
  1375. unsigned int len)
  1376. {
  1377. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1378. struct device *dev = ctx->dev;
  1379. int ret = -EINVAL;
  1380. if (len != CURVE25519_KEY_SIZE ||
  1381. !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
  1382. dev_err(dev, "key is null or key len is not 32bytes!\n");
  1383. return ret;
  1384. }
  1385. /* Free old secret if any */
  1386. hpre_ecc_clear_ctx(ctx, false, false);
  1387. ctx->key_sz = CURVE25519_KEY_SIZE;
  1388. ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
  1389. if (ret) {
  1390. dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
  1391. hpre_ecc_clear_ctx(ctx, false, false);
  1392. return ret;
  1393. }
  1394. return 0;
  1395. }
  1396. static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
  1397. struct hpre_asym_request *req,
  1398. struct scatterlist *dst,
  1399. struct scatterlist *src)
  1400. {
  1401. struct device *dev = ctx->dev;
  1402. struct hpre_sqe *sqe = &req->req;
  1403. dma_addr_t dma;
  1404. dma = le64_to_cpu(sqe->in);
  1405. if (unlikely(dma_mapping_error(dev, dma)))
  1406. return;
  1407. if (src && req->src)
  1408. dma_free_coherent(dev, ctx->key_sz, req->src, dma);
  1409. dma = le64_to_cpu(sqe->out);
  1410. if (unlikely(dma_mapping_error(dev, dma)))
  1411. return;
  1412. if (req->dst)
  1413. dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
  1414. if (dst)
  1415. dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
  1416. }
  1417. static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
  1418. {
  1419. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  1420. struct hpre_asym_request *req = NULL;
  1421. struct kpp_request *areq;
  1422. u64 overtime_thrhld;
  1423. int ret;
  1424. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  1425. areq = req->areq.curve25519;
  1426. areq->dst_len = ctx->key_sz;
  1427. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  1428. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  1429. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  1430. hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
  1431. hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  1432. kpp_request_complete(areq, ret);
  1433. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  1434. }
  1435. static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
  1436. struct kpp_request *req)
  1437. {
  1438. struct hpre_asym_request *h_req;
  1439. struct hpre_sqe *msg;
  1440. int req_id;
  1441. void *tmp;
  1442. if (unlikely(req->dst_len < ctx->key_sz)) {
  1443. req->dst_len = ctx->key_sz;
  1444. return -EINVAL;
  1445. }
  1446. tmp = kpp_request_ctx(req);
  1447. h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  1448. h_req->cb = hpre_curve25519_cb;
  1449. h_req->areq.curve25519 = req;
  1450. msg = &h_req->req;
  1451. memset(msg, 0, sizeof(*msg));
  1452. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  1453. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  1454. msg->key = cpu_to_le64(ctx->curve25519.dma_p);
  1455. msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
  1456. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  1457. h_req->ctx = ctx;
  1458. req_id = hpre_add_req_to_ctx(h_req);
  1459. if (req_id < 0)
  1460. return -EBUSY;
  1461. msg->tag = cpu_to_le16((u16)req_id);
  1462. return 0;
  1463. }
  1464. static void hpre_curve25519_src_modulo_p(u8 *ptr)
  1465. {
  1466. int i;
  1467. for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
  1468. ptr[i] = 0;
  1469. /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
  1470. ptr[i] -= 0xed;
  1471. }
  1472. static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
  1473. struct scatterlist *data, unsigned int len)
  1474. {
  1475. struct hpre_sqe *msg = &hpre_req->req;
  1476. struct hpre_ctx *ctx = hpre_req->ctx;
  1477. struct device *dev = ctx->dev;
  1478. u8 p[CURVE25519_KEY_SIZE] = { 0 };
  1479. const struct ecc_curve *curve;
  1480. dma_addr_t dma = 0;
  1481. u8 *ptr;
  1482. if (len != CURVE25519_KEY_SIZE) {
  1483. dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
  1484. return -EINVAL;
  1485. }
  1486. ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
  1487. if (unlikely(!ptr))
  1488. return -ENOMEM;
  1489. scatterwalk_map_and_copy(ptr, data, 0, len, 0);
  1490. if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
  1491. dev_err(dev, "gx is null!\n");
  1492. goto err;
  1493. }
  1494. /*
  1495. * Src_data(gx) is in little-endian order, MSB in the final byte should
  1496. * be masked as described in RFC7748, then transform it to big-endian
  1497. * form, then hisi_hpre can use the data.
  1498. */
  1499. ptr[31] &= 0x7f;
  1500. hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
  1501. curve = ecc_get_curve25519();
  1502. fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
  1503. /*
  1504. * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
  1505. * we get its modulus to p, and then use it.
  1506. */
  1507. if (memcmp(ptr, p, ctx->key_sz) == 0) {
  1508. dev_err(dev, "gx is p!\n");
  1509. goto err;
  1510. } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
  1511. hpre_curve25519_src_modulo_p(ptr);
  1512. }
  1513. hpre_req->src = ptr;
  1514. msg->in = cpu_to_le64(dma);
  1515. return 0;
  1516. err:
  1517. dma_free_coherent(dev, ctx->key_sz, ptr, dma);
  1518. return -EINVAL;
  1519. }
  1520. static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
  1521. struct scatterlist *data, unsigned int len)
  1522. {
  1523. struct hpre_sqe *msg = &hpre_req->req;
  1524. struct hpre_ctx *ctx = hpre_req->ctx;
  1525. struct device *dev = ctx->dev;
  1526. dma_addr_t dma;
  1527. if (!data || !sg_is_last(data) || len != ctx->key_sz) {
  1528. dev_err(dev, "data or data length is illegal!\n");
  1529. return -EINVAL;
  1530. }
  1531. hpre_req->dst = NULL;
  1532. dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
  1533. if (unlikely(dma_mapping_error(dev, dma))) {
  1534. dev_err(dev, "dma map data err!\n");
  1535. return -ENOMEM;
  1536. }
  1537. msg->out = cpu_to_le64(dma);
  1538. return 0;
  1539. }
  1540. static int hpre_curve25519_compute_value(struct kpp_request *req)
  1541. {
  1542. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  1543. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1544. struct device *dev = ctx->dev;
  1545. void *tmp = kpp_request_ctx(req);
  1546. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
  1547. struct hpre_sqe *msg = &hpre_req->req;
  1548. int ret;
  1549. ret = hpre_curve25519_msg_request_set(ctx, req);
  1550. if (unlikely(ret)) {
  1551. dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
  1552. return ret;
  1553. }
  1554. if (req->src) {
  1555. ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
  1556. if (unlikely(ret)) {
  1557. dev_err(dev, "failed to init src data, ret = %d!\n",
  1558. ret);
  1559. goto clear_all;
  1560. }
  1561. } else {
  1562. msg->in = cpu_to_le64(ctx->curve25519.dma_g);
  1563. }
  1564. ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
  1565. if (unlikely(ret)) {
  1566. dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
  1567. goto clear_all;
  1568. }
  1569. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
  1570. ret = hpre_send(ctx, msg);
  1571. if (likely(!ret))
  1572. return -EINPROGRESS;
  1573. clear_all:
  1574. hpre_rm_req_from_ctx(hpre_req);
  1575. hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  1576. return ret;
  1577. }
  1578. static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
  1579. {
  1580. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1581. return ctx->key_sz;
  1582. }
  1583. static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
  1584. {
  1585. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1586. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1587. }
  1588. static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
  1589. {
  1590. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1591. hpre_ecc_clear_ctx(ctx, true, false);
  1592. }
  1593. static struct akcipher_alg rsa = {
  1594. .sign = hpre_rsa_dec,
  1595. .verify = hpre_rsa_enc,
  1596. .encrypt = hpre_rsa_enc,
  1597. .decrypt = hpre_rsa_dec,
  1598. .set_pub_key = hpre_rsa_setpubkey,
  1599. .set_priv_key = hpre_rsa_setprivkey,
  1600. .max_size = hpre_rsa_max_size,
  1601. .init = hpre_rsa_init_tfm,
  1602. .exit = hpre_rsa_exit_tfm,
  1603. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1604. .base = {
  1605. .cra_ctxsize = sizeof(struct hpre_ctx),
  1606. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1607. .cra_name = "rsa",
  1608. .cra_driver_name = "hpre-rsa",
  1609. .cra_module = THIS_MODULE,
  1610. },
  1611. };
  1612. static struct kpp_alg dh = {
  1613. .set_secret = hpre_dh_set_secret,
  1614. .generate_public_key = hpre_dh_compute_value,
  1615. .compute_shared_secret = hpre_dh_compute_value,
  1616. .max_size = hpre_dh_max_size,
  1617. .init = hpre_dh_init_tfm,
  1618. .exit = hpre_dh_exit_tfm,
  1619. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1620. .base = {
  1621. .cra_ctxsize = sizeof(struct hpre_ctx),
  1622. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1623. .cra_name = "dh",
  1624. .cra_driver_name = "hpre-dh",
  1625. .cra_module = THIS_MODULE,
  1626. },
  1627. };
  1628. static struct kpp_alg ecdh_curves[] = {
  1629. {
  1630. .set_secret = hpre_ecdh_set_secret,
  1631. .generate_public_key = hpre_ecdh_compute_value,
  1632. .compute_shared_secret = hpre_ecdh_compute_value,
  1633. .max_size = hpre_ecdh_max_size,
  1634. .init = hpre_ecdh_nist_p192_init_tfm,
  1635. .exit = hpre_ecdh_exit_tfm,
  1636. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1637. .base = {
  1638. .cra_ctxsize = sizeof(struct hpre_ctx),
  1639. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1640. .cra_name = "ecdh-nist-p192",
  1641. .cra_driver_name = "hpre-ecdh-nist-p192",
  1642. .cra_module = THIS_MODULE,
  1643. },
  1644. }, {
  1645. .set_secret = hpre_ecdh_set_secret,
  1646. .generate_public_key = hpre_ecdh_compute_value,
  1647. .compute_shared_secret = hpre_ecdh_compute_value,
  1648. .max_size = hpre_ecdh_max_size,
  1649. .init = hpre_ecdh_nist_p256_init_tfm,
  1650. .exit = hpre_ecdh_exit_tfm,
  1651. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1652. .base = {
  1653. .cra_ctxsize = sizeof(struct hpre_ctx),
  1654. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1655. .cra_name = "ecdh-nist-p256",
  1656. .cra_driver_name = "hpre-ecdh-nist-p256",
  1657. .cra_module = THIS_MODULE,
  1658. },
  1659. }, {
  1660. .set_secret = hpre_ecdh_set_secret,
  1661. .generate_public_key = hpre_ecdh_compute_value,
  1662. .compute_shared_secret = hpre_ecdh_compute_value,
  1663. .max_size = hpre_ecdh_max_size,
  1664. .init = hpre_ecdh_nist_p384_init_tfm,
  1665. .exit = hpre_ecdh_exit_tfm,
  1666. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1667. .base = {
  1668. .cra_ctxsize = sizeof(struct hpre_ctx),
  1669. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1670. .cra_name = "ecdh-nist-p384",
  1671. .cra_driver_name = "hpre-ecdh-nist-p384",
  1672. .cra_module = THIS_MODULE,
  1673. },
  1674. }
  1675. };
  1676. static struct kpp_alg curve25519_alg = {
  1677. .set_secret = hpre_curve25519_set_secret,
  1678. .generate_public_key = hpre_curve25519_compute_value,
  1679. .compute_shared_secret = hpre_curve25519_compute_value,
  1680. .max_size = hpre_curve25519_max_size,
  1681. .init = hpre_curve25519_init_tfm,
  1682. .exit = hpre_curve25519_exit_tfm,
  1683. .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
  1684. .base = {
  1685. .cra_ctxsize = sizeof(struct hpre_ctx),
  1686. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1687. .cra_name = "curve25519",
  1688. .cra_driver_name = "hpre-curve25519",
  1689. .cra_module = THIS_MODULE,
  1690. },
  1691. };
  1692. static int hpre_register_rsa(struct hisi_qm *qm)
  1693. {
  1694. int ret;
  1695. if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
  1696. return 0;
  1697. rsa.base.cra_flags = 0;
  1698. ret = crypto_register_akcipher(&rsa);
  1699. if (ret)
  1700. dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
  1701. return ret;
  1702. }
  1703. static void hpre_unregister_rsa(struct hisi_qm *qm)
  1704. {
  1705. if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
  1706. return;
  1707. crypto_unregister_akcipher(&rsa);
  1708. }
  1709. static int hpre_register_dh(struct hisi_qm *qm)
  1710. {
  1711. int ret;
  1712. if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
  1713. return 0;
  1714. ret = crypto_register_kpp(&dh);
  1715. if (ret)
  1716. dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
  1717. return ret;
  1718. }
  1719. static void hpre_unregister_dh(struct hisi_qm *qm)
  1720. {
  1721. if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
  1722. return;
  1723. crypto_unregister_kpp(&dh);
  1724. }
  1725. static int hpre_register_ecdh(struct hisi_qm *qm)
  1726. {
  1727. int ret, i;
  1728. if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
  1729. return 0;
  1730. for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
  1731. ret = crypto_register_kpp(&ecdh_curves[i]);
  1732. if (ret) {
  1733. dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
  1734. ecdh_curves[i].base.cra_name, ret);
  1735. goto unreg_kpp;
  1736. }
  1737. }
  1738. return 0;
  1739. unreg_kpp:
  1740. for (--i; i >= 0; --i)
  1741. crypto_unregister_kpp(&ecdh_curves[i]);
  1742. return ret;
  1743. }
  1744. static void hpre_unregister_ecdh(struct hisi_qm *qm)
  1745. {
  1746. int i;
  1747. if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
  1748. return;
  1749. for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
  1750. crypto_unregister_kpp(&ecdh_curves[i]);
  1751. }
  1752. static int hpre_register_x25519(struct hisi_qm *qm)
  1753. {
  1754. int ret;
  1755. if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
  1756. return 0;
  1757. ret = crypto_register_kpp(&curve25519_alg);
  1758. if (ret)
  1759. dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
  1760. return ret;
  1761. }
  1762. static void hpre_unregister_x25519(struct hisi_qm *qm)
  1763. {
  1764. if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
  1765. return;
  1766. crypto_unregister_kpp(&curve25519_alg);
  1767. }
  1768. int hpre_algs_register(struct hisi_qm *qm)
  1769. {
  1770. int ret;
  1771. ret = hpre_register_rsa(qm);
  1772. if (ret)
  1773. return ret;
  1774. ret = hpre_register_dh(qm);
  1775. if (ret)
  1776. goto unreg_rsa;
  1777. ret = hpre_register_ecdh(qm);
  1778. if (ret)
  1779. goto unreg_dh;
  1780. ret = hpre_register_x25519(qm);
  1781. if (ret)
  1782. goto unreg_ecdh;
  1783. return ret;
  1784. unreg_ecdh:
  1785. hpre_unregister_ecdh(qm);
  1786. unreg_dh:
  1787. hpre_unregister_dh(qm);
  1788. unreg_rsa:
  1789. hpre_unregister_rsa(qm);
  1790. return ret;
  1791. }
  1792. void hpre_algs_unregister(struct hisi_qm *qm)
  1793. {
  1794. hpre_unregister_x25519(qm);
  1795. hpre_unregister_ecdh(qm);
  1796. hpre_unregister_dh(qm);
  1797. hpre_unregister_rsa(qm);
  1798. }