123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /*
- * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
- *
- * Copyright (C) 2013-2015 Corentin LABBE <[email protected]>
- *
- * This file add support for AES cipher with 128,192,256 bits
- * keysize in CBC and ECB mode.
- * Add support also for DES and 3DES in CBC and ECB mode.
- *
- * You could find the datasheet in Documentation/arm/sunxi.rst
- */
- #include "sun4i-ss.h"
- static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
- u32 mode = ctx->mode;
- /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
- u32 rx_cnt = SS_RX_DEFAULT;
- u32 tx_cnt = 0;
- u32 spaces;
- u32 v;
- int err = 0;
- unsigned int i;
- unsigned int ileft = areq->cryptlen;
- unsigned int oleft = areq->cryptlen;
- unsigned int todo;
- unsigned long pi = 0, po = 0; /* progress for in and out */
- bool miter_err;
- struct sg_mapping_iter mi, mo;
- unsigned int oi, oo; /* offset for in and out */
- unsigned long flags;
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun4i_ss_alg_template *algt;
- if (!areq->cryptlen)
- return 0;
- if (!areq->src || !areq->dst) {
- dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
- return -EINVAL;
- }
- if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
- areq->cryptlen - ivsize, ivsize, 0);
- }
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
- algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
- algt->stat_opti++;
- algt->stat_bytes += areq->cryptlen;
- }
- spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen / 4; i++)
- writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
- if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = *(u32 *)(areq->iv + i * 4);
- writesl(ss->base + SS_IV0 + i * 4, &v, 1);
- }
- }
- writel(mode, ss->base + SS_CTL);
- ileft = areq->cryptlen / 4;
- oleft = areq->cryptlen / 4;
- oi = 0;
- oo = 0;
- do {
- if (ileft) {
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- if (pi)
- sg_miter_skip(&mi, pi);
- miter_err = sg_miter_next(&mi);
- if (!miter_err || !mi.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
- todo = min(rx_cnt, ileft);
- todo = min_t(size_t, todo, (mi.length - oi) / 4);
- if (todo) {
- ileft -= todo;
- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
- oi += todo * 4;
- }
- if (oi == mi.length) {
- pi += mi.length;
- oi = 0;
- }
- sg_miter_stop(&mi);
- }
- spaces = readl(ss->base + SS_FCSR);
- rx_cnt = SS_RXFIFO_SPACES(spaces);
- tx_cnt = SS_TXFIFO_SPACES(spaces);
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- if (po)
- sg_miter_skip(&mo, po);
- miter_err = sg_miter_next(&mo);
- if (!miter_err || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
- todo = min(tx_cnt, oleft);
- todo = min_t(size_t, todo, (mo.length - oo) / 4);
- if (todo) {
- oleft -= todo;
- readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
- oo += todo * 4;
- }
- if (oo == mo.length) {
- oo = 0;
- po += mo.length;
- }
- sg_miter_stop(&mo);
- } while (oleft);
- if (areq->iv) {
- if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, ctx->backup_iv, ivsize);
- memzero_explicit(ctx->backup_iv, ivsize);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
- ivsize, 0);
- }
- }
- release_ss:
- writel(0, ss->base + SS_CTL);
- spin_unlock_irqrestore(&ss->slock, flags);
- return err;
- }
- static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
- int err;
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun4i_ss_alg_template *algt;
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
- algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
- algt->stat_fb++;
- }
- skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
- skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
- areq->base.complete, areq->base.data);
- skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
- areq->cryptlen, areq->iv);
- if (ctx->mode & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(&ctx->fallback_req);
- else
- err = crypto_skcipher_encrypt(&ctx->fallback_req);
- return err;
- }
- /* Generic function that support SG with size not multiple of 4 */
- static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
- int no_chunk = 1;
- struct scatterlist *in_sg = areq->src;
- struct scatterlist *out_sg = areq->dst;
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun4i_ss_alg_template *algt;
- u32 mode = ctx->mode;
- /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
- u32 rx_cnt = SS_RX_DEFAULT;
- u32 tx_cnt = 0;
- u32 v;
- u32 spaces;
- int err = 0;
- unsigned int i;
- unsigned int ileft = areq->cryptlen;
- unsigned int oleft = areq->cryptlen;
- unsigned int todo;
- struct sg_mapping_iter mi, mo;
- unsigned long pi = 0, po = 0; /* progress for in and out */
- bool miter_err;
- unsigned int oi, oo; /* offset for in and out */
- unsigned int ob = 0; /* offset in buf */
- unsigned int obo = 0; /* offset in bufo*/
- unsigned int obl = 0; /* length of data in bufo */
- unsigned long flags;
- bool need_fallback = false;
- if (!areq->cryptlen)
- return 0;
- if (!areq->src || !areq->dst) {
- dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
- return -EINVAL;
- }
- algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
- if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
- need_fallback = true;
- /*
- * if we have only SGs with size multiple of 4,
- * we can use the SS optimized function
- */
- while (in_sg && no_chunk == 1) {
- if ((in_sg->length | in_sg->offset) & 3u)
- no_chunk = 0;
- in_sg = sg_next(in_sg);
- }
- while (out_sg && no_chunk == 1) {
- if ((out_sg->length | out_sg->offset) & 3u)
- no_chunk = 0;
- out_sg = sg_next(out_sg);
- }
- if (no_chunk == 1 && !need_fallback)
- return sun4i_ss_opti_poll(areq);
- if (need_fallback)
- return sun4i_ss_cipher_poll_fallback(areq);
- if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
- scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
- areq->cryptlen - ivsize, ivsize, 0);
- }
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
- algt->stat_req++;
- algt->stat_bytes += areq->cryptlen;
- }
- spin_lock_irqsave(&ss->slock, flags);
- for (i = 0; i < op->keylen / 4; i++)
- writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
- if (areq->iv) {
- for (i = 0; i < 4 && i < ivsize / 4; i++) {
- v = *(u32 *)(areq->iv + i * 4);
- writesl(ss->base + SS_IV0 + i * 4, &v, 1);
- }
- }
- writel(mode, ss->base + SS_CTL);
- ileft = areq->cryptlen;
- oleft = areq->cryptlen;
- oi = 0;
- oo = 0;
- while (oleft) {
- if (ileft) {
- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- if (pi)
- sg_miter_skip(&mi, pi);
- miter_err = sg_miter_next(&mi);
- if (!miter_err || !mi.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
- /*
- * todo is the number of consecutive 4byte word that we
- * can read from current SG
- */
- todo = min(rx_cnt, ileft / 4);
- todo = min_t(size_t, todo, (mi.length - oi) / 4);
- if (todo && !ob) {
- writesl(ss->base + SS_RXFIFO, mi.addr + oi,
- todo);
- ileft -= todo * 4;
- oi += todo * 4;
- } else {
- /*
- * not enough consecutive bytes, so we need to
- * linearize in buf. todo is in bytes
- * After that copy, if we have a multiple of 4
- * we need to be able to write all buf in one
- * pass, so it is why we min() with rx_cnt
- */
- todo = min(rx_cnt * 4 - ob, ileft);
- todo = min_t(size_t, todo, mi.length - oi);
- memcpy(ss->buf + ob, mi.addr + oi, todo);
- ileft -= todo;
- oi += todo;
- ob += todo;
- if (!(ob % 4)) {
- writesl(ss->base + SS_RXFIFO, ss->buf,
- ob / 4);
- ob = 0;
- }
- }
- if (oi == mi.length) {
- pi += mi.length;
- oi = 0;
- }
- sg_miter_stop(&mi);
- }
- spaces = readl(ss->base + SS_FCSR);
- rx_cnt = SS_RXFIFO_SPACES(spaces);
- tx_cnt = SS_TXFIFO_SPACES(spaces);
- if (!tx_cnt)
- continue;
- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
- if (po)
- sg_miter_skip(&mo, po);
- miter_err = sg_miter_next(&mo);
- if (!miter_err || !mo.addr) {
- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
- err = -EINVAL;
- goto release_ss;
- }
- /* todo in 4bytes word */
- todo = min(tx_cnt, oleft / 4);
- todo = min_t(size_t, todo, (mo.length - oo) / 4);
- if (todo) {
- readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
- oleft -= todo * 4;
- oo += todo * 4;
- if (oo == mo.length) {
- po += mo.length;
- oo = 0;
- }
- } else {
- /*
- * read obl bytes in bufo, we read at maximum for
- * emptying the device
- */
- readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
- obl = tx_cnt * 4;
- obo = 0;
- do {
- /*
- * how many bytes we can copy ?
- * no more than remaining SG size
- * no more than remaining buffer
- * no need to test against oleft
- */
- todo = min_t(size_t,
- mo.length - oo, obl - obo);
- memcpy(mo.addr + oo, ss->bufo + obo, todo);
- oleft -= todo;
- obo += todo;
- oo += todo;
- if (oo == mo.length) {
- po += mo.length;
- sg_miter_next(&mo);
- oo = 0;
- }
- } while (obo < obl);
- /* bufo must be fully used here */
- }
- sg_miter_stop(&mo);
- }
- if (areq->iv) {
- if (mode & SS_DECRYPTION) {
- memcpy(areq->iv, ctx->backup_iv, ivsize);
- memzero_explicit(ctx->backup_iv, ivsize);
- } else {
- scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
- ivsize, 0);
- }
- }
- release_ss:
- writel(0, ss->base + SS_CTL);
- spin_unlock_irqrestore(&ss->slock, flags);
- return err;
- }
- /* CBC AES */
- int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- /* ECB AES */
- int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- /* CBC DES */
- int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- /* ECB DES */
- int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- /* CBC 3DES */
- int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- /* ECB 3DES */
- int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
- op->keymode;
- return sun4i_ss_cipher_poll(areq);
- }
- int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
- {
- struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
- struct sun4i_ss_alg_template *algt;
- const char *name = crypto_tfm_alg_name(tfm);
- int err;
- memset(op, 0, sizeof(struct sun4i_tfm_ctx));
- algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
- alg.crypto.base);
- op->ss = algt->ss;
- op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(op->fallback_tfm)) {
- dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
- name, PTR_ERR(op->fallback_tfm));
- return PTR_ERR(op->fallback_tfm);
- }
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct sun4i_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm));
- err = pm_runtime_resume_and_get(op->ss->dev);
- if (err < 0)
- goto error_pm;
- return 0;
- error_pm:
- crypto_free_skcipher(op->fallback_tfm);
- return err;
- }
- void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
- {
- struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(op->fallback_tfm);
- pm_runtime_put(op->ss->dev);
- }
- /* check and set the AES key, prepare the mode to be used */
- int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
- {
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
- switch (keylen) {
- case 128 / 8:
- op->keymode = SS_AES_128BITS;
- break;
- case 192 / 8:
- op->keymode = SS_AES_192BITS;
- break;
- case 256 / 8:
- op->keymode = SS_AES_256BITS;
- break;
- default:
- dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
- return -EINVAL;
- }
- op->keylen = keylen;
- memcpy(op->key, key, keylen);
- crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
- }
- /* check and set the DES key, prepare the mode to be used */
- int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
- {
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- int err;
- err = verify_skcipher_des_key(tfm, key);
- if (err)
- return err;
- op->keylen = keylen;
- memcpy(op->key, key, keylen);
- crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
- }
- /* check and set the 3DES key, prepare the mode to be used */
- int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
- {
- struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- int err;
- err = verify_skcipher_des3_key(tfm, key);
- if (err)
- return err;
- op->keylen = keylen;
- memcpy(op->key, key, keylen);
- crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
- }
|