123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217 |
- // SPDX-License-Identifier: GPL-2.0
- /* Copyright (c) 2019 HiSilicon Limited. */
- #include <crypto/akcipher.h>
- #include <crypto/curve25519.h>
- #include <crypto/dh.h>
- #include <crypto/ecc_curve.h>
- #include <crypto/ecdh.h>
- #include <crypto/rng.h>
- #include <crypto/internal/akcipher.h>
- #include <crypto/internal/kpp.h>
- #include <crypto/internal/rsa.h>
- #include <crypto/kpp.h>
- #include <crypto/scatterwalk.h>
- #include <linux/dma-mapping.h>
- #include <linux/fips.h>
- #include <linux/module.h>
- #include <linux/time.h>
- #include "hpre.h"
- struct hpre_ctx;
- #define HPRE_CRYPTO_ALG_PRI 1000
- #define HPRE_ALIGN_SZ 64
- #define HPRE_BITS_2_BYTES_SHIFT 3
- #define HPRE_RSA_512BITS_KSZ 64
- #define HPRE_RSA_1536BITS_KSZ 192
- #define HPRE_CRT_PRMS 5
- #define HPRE_CRT_Q 2
- #define HPRE_CRT_P 3
- #define HPRE_CRT_INV 4
- #define HPRE_DH_G_FLAG 0x02
- #define HPRE_TRY_SEND_TIMES 100
- #define HPRE_INVLD_REQ_ID (-1)
- #define HPRE_SQE_ALG_BITS 5
- #define HPRE_SQE_DONE_SHIFT 30
- #define HPRE_DH_MAX_P_SZ 512
- #define HPRE_DFX_SEC_TO_US 1000000
- #define HPRE_DFX_US_TO_NS 1000
- /* due to nist p521 */
- #define HPRE_ECC_MAX_KSZ 66
- /* size in bytes of the n prime */
- #define HPRE_ECC_NIST_P192_N_SIZE 24
- #define HPRE_ECC_NIST_P256_N_SIZE 32
- #define HPRE_ECC_NIST_P384_N_SIZE 48
- /* size in bytes */
- #define HPRE_ECC_HW256_KSZ_B 32
- #define HPRE_ECC_HW384_KSZ_B 48
- /* capability register mask of driver */
- #define HPRE_DRV_RSA_MASK_CAP BIT(0)
- #define HPRE_DRV_DH_MASK_CAP BIT(1)
- #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
- #define HPRE_DRV_X25519_MASK_CAP BIT(5)
- typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
- struct hpre_rsa_ctx {
- /* low address: e--->n */
- char *pubkey;
- dma_addr_t dma_pubkey;
- /* low address: d--->n */
- char *prikey;
- dma_addr_t dma_prikey;
- /* low address: dq->dp->q->p->qinv */
- char *crt_prikey;
- dma_addr_t dma_crt_prikey;
- struct crypto_akcipher *soft_tfm;
- };
- struct hpre_dh_ctx {
- /*
- * If base is g we compute the public key
- * ya = g^xa mod p; [RFC2631 sec 2.1.1]
- * else if base if the counterpart public key we
- * compute the shared secret
- * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
- * low address: d--->n, please refer to Hisilicon HPRE UM
- */
- char *xa_p;
- dma_addr_t dma_xa_p;
- char *g; /* m */
- dma_addr_t dma_g;
- };
- struct hpre_ecdh_ctx {
- /* low address: p->a->k->b */
- unsigned char *p;
- dma_addr_t dma_p;
- /* low address: x->y */
- unsigned char *g;
- dma_addr_t dma_g;
- };
- struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
- };
- struct hpre_ctx {
- struct hisi_qp *qp;
- struct device *dev;
- struct hpre_asym_request **req_list;
- struct hpre *hpre;
- spinlock_t req_lock;
- unsigned int key_sz;
- bool crt_g2_mode;
- struct idr req_idr;
- union {
- struct hpre_rsa_ctx rsa;
- struct hpre_dh_ctx dh;
- struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
- };
- /* for ecc algorithms */
- unsigned int curve_id;
- };
- struct hpre_asym_request {
- char *src;
- char *dst;
- struct hpre_sqe req;
- struct hpre_ctx *ctx;
- union {
- struct akcipher_request *rsa;
- struct kpp_request *dh;
- struct kpp_request *ecdh;
- struct kpp_request *curve25519;
- } areq;
- int err;
- int req_id;
- hpre_cb cb;
- struct timespec64 req_time;
- };
- static int hpre_alloc_req_id(struct hpre_ctx *ctx)
- {
- unsigned long flags;
- int id;
- spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
- return id;
- }
- static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
- {
- unsigned long flags;
- spin_lock_irqsave(&ctx->req_lock, flags);
- idr_remove(&ctx->req_idr, req_id);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
- }
- static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
- {
- struct hpre_ctx *ctx;
- struct hpre_dfx *dfx;
- int id;
- ctx = hpre_req->ctx;
- id = hpre_alloc_req_id(ctx);
- if (unlikely(id < 0))
- return -EINVAL;
- ctx->req_list[id] = hpre_req;
- hpre_req->req_id = id;
- dfx = ctx->hpre->debug.dfx;
- if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
- ktime_get_ts64(&hpre_req->req_time);
- return id;
- }
- static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
- {
- struct hpre_ctx *ctx = hpre_req->ctx;
- int id = hpre_req->req_id;
- if (hpre_req->req_id >= 0) {
- hpre_req->req_id = HPRE_INVLD_REQ_ID;
- ctx->req_list[id] = NULL;
- hpre_free_req_id(ctx, id);
- }
- }
- static struct hisi_qp *hpre_get_qp_and_start(u8 type)
- {
- struct hisi_qp *qp;
- int ret;
- qp = hpre_create_qp(type);
- if (!qp) {
- pr_err("Can not create hpre qp!\n");
- return ERR_PTR(-ENODEV);
- }
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0) {
- hisi_qm_free_qps(&qp, 1);
- pci_err(qp->qm->pdev, "Can not start qp!\n");
- return ERR_PTR(-EINVAL);
- }
- return qp;
- }
- static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
- {
- struct device *dev = hpre_req->ctx->dev;
- enum dma_data_direction dma_dir;
- if (is_src) {
- hpre_req->src = NULL;
- dma_dir = DMA_TO_DEVICE;
- } else {
- hpre_req->dst = NULL;
- dma_dir = DMA_FROM_DEVICE;
- }
- *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
- if (unlikely(dma_mapping_error(dev, *tmp))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- return 0;
- }
- static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
- {
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- void *ptr;
- int shift;
- shift = ctx->key_sz - len;
- if (unlikely(shift < 0))
- return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
- if (unlikely(!ptr))
- return -ENOMEM;
- if (is_src) {
- scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
- hpre_req->src = ptr;
- } else {
- hpre_req->dst = ptr;
- }
- return 0;
- }
- static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp = 0;
- int ret;
- /* when the data is dh's source, we should format it */
- if ((sg_is_last(data) && len == ctx->key_sz) &&
- ((is_dh && !is_src) || !is_dh))
- ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
- else
- ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
- if (unlikely(ret))
- return ret;
- if (is_src)
- msg->in = cpu_to_le64(tmp);
- else
- msg->out = cpu_to_le64(tmp);
- return 0;
- }
- static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t tmp;
- tmp = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, tmp)))
- return;
- if (src) {
- if (req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
- else
- dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
- }
- tmp = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, tmp)))
- return;
- if (req->dst) {
- if (dst)
- scatterwalk_map_and_copy(req->dst, dst, 0,
- ctx->key_sz, 1);
- dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
- } else {
- dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
- }
- }
- static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
- {
- struct hpre_asym_request *req;
- unsigned int err, done, alg;
- int id;
- #define HPRE_NO_HW_ERR 0
- #define HPRE_HW_TASK_DONE 3
- #define HREE_HW_ERR_MASK GENMASK(10, 0)
- #define HREE_SQE_DONE_MASK GENMASK(1, 0)
- #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
- id = (int)le16_to_cpu(sqe->tag);
- req = ctx->req_list[id];
- hpre_rm_req_from_ctx(req);
- *kreq = req;
- err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
- HREE_HW_ERR_MASK;
- done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
- HREE_SQE_DONE_MASK;
- if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
- return 0;
- alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
- dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
- alg, done, err);
- return -EINVAL;
- }
- static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
- {
- struct hpre *hpre;
- if (!ctx || !qp || qlen < 0)
- return -EINVAL;
- spin_lock_init(&ctx->req_lock);
- ctx->qp = qp;
- ctx->dev = &qp->qm->pdev->dev;
- hpre = container_of(ctx->qp->qm, struct hpre, qm);
- ctx->hpre = hpre;
- ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
- if (!ctx->req_list)
- return -ENOMEM;
- ctx->key_sz = 0;
- ctx->crt_g2_mode = false;
- idr_init(&ctx->req_idr);
- return 0;
- }
- static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
- {
- if (is_clear_all) {
- idr_destroy(&ctx->req_idr);
- kfree(ctx->req_list);
- hisi_qm_free_qps(&ctx->qp, 1);
- }
- ctx->crt_g2_mode = false;
- ctx->key_sz = 0;
- }
- static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
- u64 overtime_thrhld)
- {
- struct timespec64 reply_time;
- u64 time_use_us;
- ktime_get_ts64(&reply_time);
- time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
- HPRE_DFX_SEC_TO_US +
- (reply_time.tv_nsec - req->req_time.tv_nsec) /
- HPRE_DFX_US_TO_NS;
- if (time_use_us <= overtime_thrhld)
- return false;
- return true;
- }
- static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.dh;
- areq->dst_len = ctx->key_sz;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req;
- struct akcipher_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- areq = req->areq.rsa;
- areq->dst_len = ctx->key_sz;
- hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- akcipher_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
- {
- struct hpre_ctx *ctx = qp->qp_ctx;
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_sqe *sqe = resp;
- struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- if (unlikely(!req)) {
- atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
- return;
- }
- req->cb(ctx, resp);
- }
- static void hpre_stop_qp_and_put(struct hisi_qp *qp)
- {
- hisi_qm_stop_qp(qp);
- hisi_qm_free_qps(&qp, 1);
- }
- static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
- {
- struct hisi_qp *qp;
- int ret;
- qp = hpre_get_qp_and_start(type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
- qp->qp_ctx = ctx;
- qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
- if (ret)
- hpre_stop_qp_and_put(qp);
- return ret;
- }
- static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (is_rsa) {
- struct akcipher_request *akreq = req;
- if (akreq->dst_len < ctx->key_sz) {
- akreq->dst_len = ctx->key_sz;
- return -EOVERFLOW;
- }
- tmp = akcipher_request_ctx(akreq);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- h_req->cb = hpre_rsa_cb;
- h_req->areq.rsa = akreq;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- } else {
- struct kpp_request *kreq = req;
- if (kreq->dst_len < ctx->key_sz) {
- kreq->dst_len = ctx->key_sz;
- return -EOVERFLOW;
- }
- tmp = kpp_request_ctx(kreq);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- h_req->cb = hpre_dh_cb;
- h_req->areq.dh = kreq;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
- }
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- int ctr = 0;
- int ret;
- do {
- atomic64_inc(&dfx[HPRE_SEND_CNT].value);
- ret = hisi_qp_send(ctx->qp, msg);
- if (ret != -EBUSY)
- break;
- atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
- } while (ctr++ < HPRE_TRY_SEND_TIMES);
- if (likely(!ret))
- return ret;
- if (ret != -EBUSY)
- atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
- return ret;
- }
- static int hpre_dh_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_msg_request_set(ctx, req, false);
- if (unlikely(ret))
- return ret;
- if (req->src) {
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (unlikely(ret))
- goto clear_all;
- } else {
- msg->in = cpu_to_le64(ctx->dh.dma_g);
- }
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (unlikely(ret))
- goto clear_all;
- if (ctx->crt_g2_mode && !req->src)
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
- else
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_is_dh_params_length_valid(unsigned int key_sz)
- {
- #define _HPRE_DH_GRP1 768
- #define _HPRE_DH_GRP2 1024
- #define _HPRE_DH_GRP5 1536
- #define _HPRE_DH_GRP14 2048
- #define _HPRE_DH_GRP15 3072
- #define _HPRE_DH_GRP16 4096
- switch (key_sz) {
- case _HPRE_DH_GRP1:
- case _HPRE_DH_GRP2:
- case _HPRE_DH_GRP5:
- case _HPRE_DH_GRP14:
- case _HPRE_DH_GRP15:
- case _HPRE_DH_GRP16:
- return 0;
- default:
- return -EINVAL;
- }
- }
- static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
- {
- struct device *dev = ctx->dev;
- unsigned int sz;
- if (params->p_size > HPRE_DH_MAX_P_SZ)
- return -EINVAL;
- if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
- return -EINVAL;
- sz = ctx->key_sz = params->p_size;
- ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
- if (!ctx->dh.xa_p)
- return -ENOMEM;
- memcpy(ctx->dh.xa_p + sz, params->p, sz);
- /* If g equals 2 don't copy it */
- if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
- ctx->crt_g2_mode = true;
- return 0;
- }
- ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
- if (!ctx->dh.g) {
- dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
- ctx->dh.dma_xa_p);
- ctx->dh.xa_p = NULL;
- return -ENOMEM;
- }
- memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
- return 0;
- }
- static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (ctx->dh.g) {
- dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
- ctx->dh.g = NULL;
- }
- if (ctx->dh.xa_p) {
- memzero_explicit(ctx->dh.xa_p, sz);
- dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
- ctx->dh.dma_xa_p);
- ctx->dh.xa_p = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct dh params;
- int ret;
- if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
- return -EINVAL;
- /* Free old secret if any */
- hpre_dh_clear_ctx(ctx, false);
- ret = hpre_dh_set_params(ctx, ¶ms);
- if (ret < 0)
- goto err_clear_ctx;
- memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
- params.key_size);
- return 0;
- err_clear_ctx:
- hpre_dh_clear_ctx(ctx, false);
- return ret;
- }
- static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return ctx->key_sz;
- }
- static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
- }
- static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_dh_clear_ctx(ctx, true);
- }
- static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
- {
- while (!**ptr && *len) {
- (*ptr)++;
- (*len)--;
- }
- }
- static bool hpre_rsa_key_size_is_support(unsigned int len)
- {
- unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
- #define _RSA_1024BITS_KEY_WDTH 1024
- #define _RSA_2048BITS_KEY_WDTH 2048
- #define _RSA_3072BITS_KEY_WDTH 3072
- #define _RSA_4096BITS_KEY_WDTH 4096
- switch (bits) {
- case _RSA_1024BITS_KEY_WDTH:
- case _RSA_2048BITS_KEY_WDTH:
- case _RSA_3072BITS_KEY_WDTH:
- case _RSA_4096BITS_KEY_WDTH:
- return true;
- default:
- return false;
- }
- }
- static int hpre_rsa_enc(struct akcipher_request *req)
- {
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
- akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
- ret = crypto_akcipher_encrypt(req);
- akcipher_request_set_tfm(req, tfm);
- return ret;
- }
- if (unlikely(!ctx->rsa.pubkey))
- return -EINVAL;
- ret = hpre_msg_request_set(ctx, req, true);
- if (unlikely(ret))
- return ret;
- msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
- msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (unlikely(ret))
- goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (unlikely(ret))
- goto clear_all;
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_rsa_dec(struct akcipher_request *req)
- {
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
- akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
- ret = crypto_akcipher_decrypt(req);
- akcipher_request_set_tfm(req, tfm);
- return ret;
- }
- if (unlikely(!ctx->rsa.prikey))
- return -EINVAL;
- ret = hpre_msg_request_set(ctx, req, true);
- if (unlikely(ret))
- return ret;
- if (ctx->crt_g2_mode) {
- msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
- HPRE_ALG_NC_CRT);
- } else {
- msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
- HPRE_ALG_NC_NCRT);
- }
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (unlikely(ret))
- goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (unlikely(ret))
- goto clear_all;
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
- size_t vlen, bool private)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- ctx->key_sz = vlen;
- /* if invalid key size provided, we use software tfm */
- if (!hpre_rsa_key_size_is_support(ctx->key_sz))
- return 0;
- ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
- &ctx->rsa.dma_pubkey,
- GFP_KERNEL);
- if (!ctx->rsa.pubkey)
- return -ENOMEM;
- if (private) {
- ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
- &ctx->rsa.dma_prikey,
- GFP_KERNEL);
- if (!ctx->rsa.prikey) {
- dma_free_coherent(ctx->dev, vlen << 1,
- ctx->rsa.pubkey,
- ctx->rsa.dma_pubkey);
- ctx->rsa.pubkey = NULL;
- return -ENOMEM;
- }
- memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
- }
- memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
- /* Using hardware HPRE to do RSA */
- return 1;
- }
- static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
- size_t vlen)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
- return -EINVAL;
- memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
- return 0;
- }
- static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
- size_t vlen)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
- return -EINVAL;
- memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
- return 0;
- }
- static int hpre_crt_para_get(char *para, size_t para_sz,
- const char *raw, size_t raw_sz)
- {
- const char *ptr = raw;
- size_t len = raw_sz;
- hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_sz)
- return -EINVAL;
- memcpy(para + para_sz - len, ptr, len);
- return 0;
- }
- static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
- {
- unsigned int hlf_ksz = ctx->key_sz >> 1;
- struct device *dev = ctx->dev;
- u64 offset;
- int ret;
- ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
- &ctx->rsa.dma_crt_prikey,
- GFP_KERNEL);
- if (!ctx->rsa.crt_prikey)
- return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
- rsa_key->dq, rsa_key->dq_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->dp, rsa_key->dp_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->q, rsa_key->q_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->p, rsa_key->p_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->qinv, rsa_key->qinv_sz);
- if (ret)
- goto free_key;
- ctx->crt_g2_mode = true;
- return 0;
- free_key:
- offset = hlf_ksz * HPRE_CRT_PRMS;
- memzero_explicit(ctx->rsa.crt_prikey, offset);
- dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
- ctx->rsa.dma_crt_prikey);
- ctx->rsa.crt_prikey = NULL;
- ctx->crt_g2_mode = false;
- return ret;
- }
- /* If it is clear all, all the resources of the QP will be cleaned. */
- static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
- {
- unsigned int half_key_sz = ctx->key_sz >> 1;
- struct device *dev = ctx->dev;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (ctx->rsa.pubkey) {
- dma_free_coherent(dev, ctx->key_sz << 1,
- ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
- ctx->rsa.pubkey = NULL;
- }
- if (ctx->rsa.crt_prikey) {
- memzero_explicit(ctx->rsa.crt_prikey,
- half_key_sz * HPRE_CRT_PRMS);
- dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
- ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
- ctx->rsa.crt_prikey = NULL;
- }
- if (ctx->rsa.prikey) {
- memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
- ctx->rsa.dma_prikey);
- ctx->rsa.prikey = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- /*
- * we should judge if it is CRT or not,
- * CRT: return true, N-CRT: return false .
- */
- static bool hpre_is_crt_key(struct rsa_key *key)
- {
- u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
- key->qinv_sz;
- #define LEN_OF_NCRT_PARA 5
- /* N-CRT less than 5 parameters */
- return len > LEN_OF_NCRT_PARA;
- }
- static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
- unsigned int keylen, bool private)
- {
- struct rsa_key rsa_key;
- int ret;
- hpre_rsa_clear_ctx(ctx, false);
- if (private)
- ret = rsa_parse_priv_key(&rsa_key, key, keylen);
- else
- ret = rsa_parse_pub_key(&rsa_key, key, keylen);
- if (ret < 0)
- return ret;
- ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
- if (ret <= 0)
- return ret;
- if (private) {
- ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
- if (ret < 0)
- goto free;
- if (hpre_is_crt_key(&rsa_key)) {
- ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
- if (ret < 0)
- goto free;
- }
- }
- ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
- if (ret < 0)
- goto free;
- if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
- ret = -EINVAL;
- goto free;
- }
- return 0;
- free:
- hpre_rsa_clear_ctx(ctx, false);
- return ret;
- }
- static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
- if (ret)
- return ret;
- return hpre_rsa_setkey(ctx, key, keylen, false);
- }
- static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
- if (ret)
- return ret;
- return hpre_rsa_setkey(ctx, key, keylen, true);
- }
- static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
- return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
- return ctx->key_sz;
- }
- static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
- if (IS_ERR(ctx->rsa.soft_tfm)) {
- pr_err("Can not alloc_akcipher!\n");
- return PTR_ERR(ctx->rsa.soft_tfm);
- }
- ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
- if (ret)
- crypto_free_akcipher(ctx->rsa.soft_tfm);
- return ret;
- }
- static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- hpre_rsa_clear_ctx(ctx, true);
- crypto_free_akcipher(ctx->rsa.soft_tfm);
- }
- static void hpre_key_to_big_end(u8 *data, int len)
- {
- int i, j;
- for (i = 0; i < len / 2; i++) {
- j = len - i - 1;
- swap(data[j], data[i]);
- }
- }
- static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
- /* ecdh: p->a->k->b */
- memzero_explicit(ctx->ecdh.p + shift, sz);
- dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
- ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- /*
- * The bits of 192/224/256/384/521 are supported by HPRE,
- * and convert the bits like:
- * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
- * If the parameter bit width is insufficient, then we fill in the
- * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
- */
- static unsigned int hpre_ecdh_supported_curve(unsigned short id)
- {
- switch (id) {
- case ECC_CURVE_NIST_P192:
- case ECC_CURVE_NIST_P256:
- return HPRE_ECC_HW256_KSZ_B;
- case ECC_CURVE_NIST_P384:
- return HPRE_ECC_HW384_KSZ_B;
- default:
- break;
- }
- return 0;
- }
- static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
- {
- unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
- u8 i = 0;
- while (i < ndigits - 1) {
- memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
- i++;
- }
- memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
- hpre_key_to_big_end((u8 *)addr, cur_sz);
- }
- static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
- unsigned int cur_sz)
- {
- unsigned int shifta = ctx->key_sz << 1;
- unsigned int shiftb = ctx->key_sz << 2;
- void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
- void *a = ctx->ecdh.p + shifta - cur_sz;
- void *b = ctx->ecdh.p + shiftb - cur_sz;
- void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
- void *y = ctx->ecdh.g + shifta - cur_sz;
- const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
- char *n;
- if (unlikely(!curve))
- return -EINVAL;
- n = kzalloc(ctx->key_sz, GFP_KERNEL);
- if (!n)
- return -ENOMEM;
- fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
- fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
- fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
- fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
- fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
- fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
- if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
- kfree(n);
- return -EINVAL;
- }
- kfree(n);
- return 0;
- }
- static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
- {
- switch (id) {
- case ECC_CURVE_NIST_P192:
- return HPRE_ECC_NIST_P192_N_SIZE;
- case ECC_CURVE_NIST_P256:
- return HPRE_ECC_NIST_P256_N_SIZE;
- case ECC_CURVE_NIST_P384:
- return HPRE_ECC_NIST_P384_N_SIZE;
- default:
- break;
- }
- return 0;
- }
- static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
- {
- struct device *dev = ctx->dev;
- unsigned int sz, shift, curve_sz;
- int ret;
- ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
- if (!ctx->key_sz)
- return -EINVAL;
- curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
- if (!curve_sz || params->key_size > curve_sz)
- return -EINVAL;
- sz = ctx->key_sz;
- if (!ctx->ecdh.p) {
- ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
- GFP_KERNEL);
- if (!ctx->ecdh.p)
- return -ENOMEM;
- }
- shift = sz << 2;
- ctx->ecdh.g = ctx->ecdh.p + shift;
- ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
- ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
- if (ret) {
- dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
- dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
- ctx->ecdh.p = NULL;
- return ret;
- }
- return 0;
- }
- static bool hpre_key_is_zero(char *key, unsigned short key_sz)
- {
- int i;
- for (i = 0; i < key_sz; i++)
- if (key[i])
- return false;
- return true;
- }
- static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
- {
- struct device *dev = ctx->dev;
- int ret;
- ret = crypto_get_default_rng();
- if (ret) {
- dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
- return ret;
- }
- ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
- params->key_size);
- crypto_put_default_rng();
- if (ret)
- dev_err(dev, "failed to get rng, ret = %d!\n", ret);
- return ret;
- }
- static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- char key[HPRE_ECC_MAX_KSZ];
- unsigned int sz, sz_shift;
- struct ecdh params;
- int ret;
- if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
- dev_err(dev, "failed to decode ecdh key!\n");
- return -EINVAL;
- }
- /* Use stdrng to generate private key */
- if (!params.key || !params.key_size) {
- params.key = key;
- params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
- ret = ecdh_gen_privkey(ctx, ¶ms);
- if (ret)
- return ret;
- }
- if (hpre_key_is_zero(params.key, params.key_size)) {
- dev_err(dev, "Invalid hpre key!\n");
- return -EINVAL;
- }
- hpre_ecc_clear_ctx(ctx, false, true);
- ret = hpre_ecdh_set_param(ctx, ¶ms);
- if (ret < 0) {
- dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
- return ret;
- }
- sz = ctx->key_sz;
- sz_shift = (sz << 1) + sz - params.key_size;
- memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
- return 0;
- }
- static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
- }
- static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
- {
- unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- char *p;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.ecdh;
- areq->dst_len = ctx->key_sz << 1;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- p = sg_virt(areq->dst);
- memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
- memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (req->dst_len < ctx->key_sz << 1) {
- req->dst_len = ctx->key_sz << 1;
- return -EINVAL;
- }
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- h_req->cb = hpre_ecdh_cb;
- h_req->areq.ecdh = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->ecdh.dma_p);
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- unsigned int tmpshift;
- dma_addr_t dma = 0;
- void *ptr;
- int shift;
- /* Src_data include gx and gy. */
- shift = ctx->key_sz - (len >> 1);
- if (unlikely(shift < 0))
- return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
- tmpshift = ctx->key_sz << 1;
- scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
- memcpy(ptr + shift, ptr + tmpshift, len >> 1);
- memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
- if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- msg->out = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_ecdh_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_ecdh_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
- return ret;
- }
- if (req->src) {
- ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n", ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->ecdh.dma_g);
- }
- ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- /* max size is the pub_key_size, include x and y */
- return ctx->key_sz << 1;
- }
- static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P192;
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P256;
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P384;
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
- }
- static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
- {
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
- p = ctx->curve25519.p + sz - len;
- curve = ecc_get_curve25519();
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
- }
- static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
- hpre_curve25519_fill_curve(ctx, buf, len);
- return 0;
- }
- static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
- return 0;
- }
- static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
- }
- static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static void hpre_curve25519_src_modulo_p(u8 *ptr)
- {
- int i;
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
- }
- static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
- curve = ecc_get_curve25519();
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
- err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
- }
- static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- msg->out = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_curve25519_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return ctx->key_sz;
- }
- static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, false);
- }
- static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
- .encrypt = hpre_rsa_enc,
- .decrypt = hpre_rsa_dec,
- .set_pub_key = hpre_rsa_setpubkey,
- .set_priv_key = hpre_rsa_setprivkey,
- .max_size = hpre_rsa_max_size,
- .init = hpre_rsa_init_tfm,
- .exit = hpre_rsa_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "rsa",
- .cra_driver_name = "hpre-rsa",
- .cra_module = THIS_MODULE,
- },
- };
- static struct kpp_alg dh = {
- .set_secret = hpre_dh_set_secret,
- .generate_public_key = hpre_dh_compute_value,
- .compute_shared_secret = hpre_dh_compute_value,
- .max_size = hpre_dh_max_size,
- .init = hpre_dh_init_tfm,
- .exit = hpre_dh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "dh",
- .cra_driver_name = "hpre-dh",
- .cra_module = THIS_MODULE,
- },
- };
- static struct kpp_alg ecdh_curves[] = {
- {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
- }, {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
- }, {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
- }
- };
- static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
- };
- static int hpre_register_rsa(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
- return 0;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_rsa(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
- return;
- crypto_unregister_akcipher(&rsa);
- }
- static int hpre_register_dh(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
- return 0;
- ret = crypto_register_kpp(&dh);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_dh(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
- return;
- crypto_unregister_kpp(&dh);
- }
- static int hpre_register_ecdh(struct hisi_qm *qm)
- {
- int ret, i;
- if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
- return 0;
- for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
- ret = crypto_register_kpp(&ecdh_curves[i]);
- if (ret) {
- dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
- ecdh_curves[i].base.cra_name, ret);
- goto unreg_kpp;
- }
- }
- return 0;
- unreg_kpp:
- for (--i; i >= 0; --i)
- crypto_unregister_kpp(&ecdh_curves[i]);
- return ret;
- }
- static void hpre_unregister_ecdh(struct hisi_qm *qm)
- {
- int i;
- if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
- return;
- for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
- crypto_unregister_kpp(&ecdh_curves[i]);
- }
- static int hpre_register_x25519(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_x25519(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
- crypto_unregister_kpp(&curve25519_alg);
- }
- int hpre_algs_register(struct hisi_qm *qm)
- {
- int ret;
- ret = hpre_register_rsa(qm);
- if (ret)
- return ret;
- ret = hpre_register_dh(qm);
- if (ret)
- goto unreg_rsa;
- ret = hpre_register_ecdh(qm);
- if (ret)
- goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
- return ret;
- unreg_ecdh:
- hpre_unregister_ecdh(qm);
- unreg_dh:
- hpre_unregister_dh(qm);
- unreg_rsa:
- hpre_unregister_rsa(qm);
- return ret;
- }
- void hpre_algs_unregister(struct hisi_qm *qm)
- {
- hpre_unregister_x25519(qm);
- hpre_unregister_ecdh(qm);
- hpre_unregister_dh(qm);
- hpre_unregister_rsa(qm);
- }
|