Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Add support for AEAD in simd - Add fuzz testing to testmgr - Add panic_on_fail module parameter to testmgr - Use per-CPU struct instead multiple variables in scompress - Change verify API for akcipher Algorithms: - Convert x86 AEAD algorithms over to simd - Forbid 2-key 3DES in FIPS mode - Add EC-RDSA (GOST 34.10) algorithm Drivers: - Set output IV with ctr-aes in crypto4xx - Set output IV in rockchip - Fix potential length overflow with hashing in sun4i-ss - Fix computation error with ctr in vmx - Add SM4 protected keys support in ccree - Remove long-broken mxc-scc driver - Add rfc4106(gcm(aes)) cipher support in cavium/nitrox" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (179 commits) crypto: ccree - use a proper le32 type for le32 val crypto: ccree - remove set but not used variable 'du_size' crypto: ccree - Make cc_sec_disable static crypto: ccree - fix spelling mistake "protedcted" -> "protected" crypto: caam/qi2 - generate hash keys in-place crypto: caam/qi2 - fix DMA mapping of stack memory crypto: caam/qi2 - fix zero-length buffer DMA mapping crypto: stm32/cryp - update to return iv_out crypto: stm32/cryp - remove request mutex protection crypto: stm32/cryp - add weak key check for DES crypto: atmel - remove set but not used variable 'alg_name' crypto: picoxcell - Use dev_get_drvdata() crypto: crypto4xx - get rid of redundant using_sd variable crypto: crypto4xx - use sync skcipher for fallback crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues crypto: crypto4xx - fix ctr-aes missing output IV crypto: ecrdsa - select ASN1 and OID_REGISTRY for EC-RDSA crypto: ux500 - use ccflags-y instead of CFLAGS_<basename>.o crypto: ccree - handle tee fips error during power management resume crypto: ccree - add function to handle cryptocell tee fips error ...
This commit is contained in:
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
|
||||
This option enables support for the SAHARA HW crypto accelerator
|
||||
found in some Freescale i.MX chips.
|
||||
|
||||
config CRYPTO_DEV_MXC_SCC
|
||||
tristate "Support for Freescale Security Controller (SCC)"
|
||||
depends on ARCH_MXC && OF
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_DES
|
||||
help
|
||||
This option enables support for the Security Controller (SCC)
|
||||
found in Freescale i.MX25 chips.
|
||||
|
||||
config CRYPTO_DEV_EXYNOS_RNG
|
||||
tristate "EXYNOS HW pseudo random number generator support"
|
||||
depends on ARCH_EXYNOS || COMPILE_TEST
|
||||
|
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
|
||||
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
|
||||
n2_crypto-y := n2_core.o n2_asm.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
|
||||
|
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
|
||||
/* Setup SA */
|
||||
sa = ctx->sa_in;
|
||||
|
||||
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
|
||||
SA_SAVE_IV : SA_NOT_SAVE_IV),
|
||||
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
|
||||
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
|
||||
SA_NOT_SAVE_IV : SA_SAVE_IV),
|
||||
SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
|
||||
SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
|
||||
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
|
||||
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
|
||||
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
|
||||
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
|
||||
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
|
||||
sa = ctx->sa_out;
|
||||
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
|
||||
/*
|
||||
* SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
|
||||
* it's the DIR_(IN|OUT)BOUND that matters
|
||||
*/
|
||||
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
|
||||
* overlow.
|
||||
*/
|
||||
if (counter + nblks < counter) {
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
|
||||
int ret;
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
|
||||
{
|
||||
int rc;
|
||||
|
||||
crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
|
||||
crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
|
||||
crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
|
||||
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
|
||||
rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
|
||||
rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
|
||||
crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
|
||||
crypto_skcipher_set_flags(cipher,
|
||||
crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
|
||||
crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return rc;
|
||||
|
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
|
||||
|
||||
req = skcipher_request_cast(pd_uinfo->async_req);
|
||||
|
||||
if (pd_uinfo->using_sd) {
|
||||
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
|
||||
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
|
||||
req->cryptlen, req->dst);
|
||||
} else {
|
||||
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
|
||||
u32 icv[AES_BLOCK_SIZE];
|
||||
int err = 0;
|
||||
|
||||
if (pd_uinfo->using_sd) {
|
||||
if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
|
||||
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
|
||||
pd->pd_ctl_len.bf.pkt_len,
|
||||
dst);
|
||||
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
size_t offset_to_sr_ptr;
|
||||
u32 gd_idx = 0;
|
||||
int tmp;
|
||||
bool is_busy;
|
||||
bool is_busy, force_sd;
|
||||
|
||||
/*
|
||||
* There's a very subtile/disguised "bug" in the hardware that
|
||||
* gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
|
||||
* of the hardware spec:
|
||||
* *drum roll* the AES/(T)DES OFB and CFB modes are listed as
|
||||
* operation modes for >>> "Block ciphers" <<<.
|
||||
*
|
||||
* To workaround this issue and stop the hardware from causing
|
||||
* "overran dst buffer" on crypttexts that are not a multiple
|
||||
* of 16 (AES_BLOCK_SIZE), we force the driver to use the
|
||||
* scatter buffers.
|
||||
*/
|
||||
force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
|
||||
|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
|
||||
&& (datalen % AES_BLOCK_SIZE);
|
||||
|
||||
/* figure how many gd are needed */
|
||||
tmp = sg_nents_for_len(src, assoclen + datalen);
|
||||
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
}
|
||||
|
||||
/* figure how many sd are needed */
|
||||
if (sg_is_last(dst)) {
|
||||
if (sg_is_last(dst) && force_sd == false) {
|
||||
num_sd = 0;
|
||||
} else {
|
||||
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
|
||||
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
pd->sa_len = sa_len;
|
||||
|
||||
pd_uinfo = &dev->pdr_uinfo[pd_entry];
|
||||
pd_uinfo->async_req = req;
|
||||
pd_uinfo->num_gd = num_gd;
|
||||
pd_uinfo->num_sd = num_sd;
|
||||
pd_uinfo->dest_va = dst;
|
||||
pd_uinfo->async_req = req;
|
||||
|
||||
if (iv_len)
|
||||
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
|
||||
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
/* get first gd we are going to use */
|
||||
gd_idx = fst_gd;
|
||||
pd_uinfo->first_gd = fst_gd;
|
||||
pd_uinfo->num_gd = num_gd;
|
||||
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
|
||||
pd->src = gd_dma;
|
||||
/* enable gather */
|
||||
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
* Indicate gather array is not used
|
||||
*/
|
||||
pd_uinfo->first_gd = 0xffffffff;
|
||||
pd_uinfo->num_gd = 0;
|
||||
}
|
||||
if (sg_is_last(dst)) {
|
||||
if (!num_sd) {
|
||||
/*
|
||||
* we know application give us dst a whole piece of memory
|
||||
* no need to use scatter ring.
|
||||
*/
|
||||
pd_uinfo->using_sd = 0;
|
||||
pd_uinfo->first_sd = 0xffffffff;
|
||||
pd_uinfo->num_sd = 0;
|
||||
pd_uinfo->dest_va = dst;
|
||||
sa->sa_command_0.bf.scatter = 0;
|
||||
pd->dest = (u32)dma_map_page(dev->core_dev->device,
|
||||
sg_page(dst), dst->offset,
|
||||
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
|
||||
u32 sd_idx = fst_sd;
|
||||
nbytes = datalen;
|
||||
sa->sa_command_0.bf.scatter = 1;
|
||||
pd_uinfo->using_sd = 1;
|
||||
pd_uinfo->dest_va = dst;
|
||||
pd_uinfo->first_sd = fst_sd;
|
||||
pd_uinfo->num_sd = num_sd;
|
||||
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
|
||||
pd->dest = sd_dma;
|
||||
/* setup scatter descriptor */
|
||||
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
|
||||
|
||||
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
|
||||
ctx->sw_cipher.cipher =
|
||||
crypto_alloc_skcipher(alg->base.cra_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC);
|
||||
crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->sw_cipher.cipher))
|
||||
return PTR_ERR(ctx->sw_cipher.cipher);
|
||||
|
||||
crypto_skcipher_set_reqsize(sk,
|
||||
sizeof(struct skcipher_request) + 32 +
|
||||
crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
|
||||
}
|
||||
|
||||
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
|
||||
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
|
||||
|
||||
crypto4xx_common_exit(ctx);
|
||||
if (ctx->sw_cipher.cipher)
|
||||
crypto_free_skcipher(ctx->sw_cipher.cipher);
|
||||
crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
|
||||
}
|
||||
|
||||
static int crypto4xx_aead_init(struct crypto_aead *tfm)
|
||||
|
@@ -64,7 +64,6 @@ union shadow_sa_buf {
|
||||
struct pd_uinfo {
|
||||
struct crypto4xx_device *dev;
|
||||
u32 state;
|
||||
u32 using_sd;
|
||||
u32 first_gd; /* first gather discriptor
|
||||
used by this packet */
|
||||
u32 num_gd; /* number of gather discriptor
|
||||
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
|
||||
__le32 iv_nonce;
|
||||
u32 sa_len;
|
||||
union {
|
||||
struct crypto_skcipher *cipher;
|
||||
struct crypto_sync_skcipher *cipher;
|
||||
struct crypto_aead *aead;
|
||||
} sw_cipher;
|
||||
};
|
||||
|
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
const char *alg_name;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
|
||||
|
||||
/*
|
||||
* HW bug in cfb 3-keys mode.
|
||||
*/
|
||||
if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
|
||||
&& (keylen != 2*DES_KEY_SIZE)) {
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
flags = crypto_ablkcipher_get_flags(tfm);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(tfm, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2 * DES_KEY_SIZE,
|
||||
.min_keysize = 3 * DES_KEY_SIZE,
|
||||
.max_keysize = 3 * DES_KEY_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_ecb_encrypt,
|
||||
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.min_keysize = 3*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
@@ -1087,86 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.decrypt = atmel_tdes_cbc_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "cfb(des3_ede)",
|
||||
.cra_driver_name = "atmel-cfb-tdes",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
|
||||
.cra_alignmask = 0x7,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_cfb_encrypt,
|
||||
.decrypt = atmel_tdes_cfb_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "cfb8(des3_ede)",
|
||||
.cra_driver_name = "atmel-cfb8-tdes",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CFB8_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_cfb8_encrypt,
|
||||
.decrypt = atmel_tdes_cfb8_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "cfb16(des3_ede)",
|
||||
.cra_driver_name = "atmel-cfb16-tdes",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CFB16_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
|
||||
.cra_alignmask = 0x1,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_cfb16_encrypt,
|
||||
.decrypt = atmel_tdes_cfb16_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "cfb32(des3_ede)",
|
||||
.cra_driver_name = "atmel-cfb32-tdes",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CFB32_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
|
||||
.cra_alignmask = 0x3,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.max_keysize = 2*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
.encrypt = atmel_tdes_cfb32_encrypt,
|
||||
.decrypt = atmel_tdes_cfb32_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "ofb(des3_ede)",
|
||||
.cra_driver_name = "atmel-ofb-tdes",
|
||||
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_tdes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2*DES_KEY_SIZE,
|
||||
.min_keysize = 3*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = atmel_tdes_setkey,
|
||||
|
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
|
||||
SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
|
||||
|
||||
hdesc->tfm = tfm_ctx->child_hash;
|
||||
hdesc->flags = crypto_ahash_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
tfm_ctx->hmac_key_length = blocksize;
|
||||
ret = crypto_shash_digest(hdesc, key, keylen,
|
||||
|
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
if (keylen == (DES_KEY_SIZE * 3)) {
|
||||
const u32 *K = (const u32 *)key;
|
||||
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
|
||||
u32 flags;
|
||||
int ret;
|
||||
|
||||
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
ret = __des3_verify_key(&flags, key);
|
||||
if (unlikely(ret)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->cipher_type = CIPHER_TYPE_3DES;
|
||||
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
|
||||
goto err_hash;
|
||||
}
|
||||
ctx->shash->tfm = hash;
|
||||
ctx->shash->flags = 0;
|
||||
|
||||
/* Set the key using data we already have from setkey */
|
||||
if (ctx->authkeylen > 0) {
|
||||
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
|
||||
break;
|
||||
case CIPHER_ALG_3DES:
|
||||
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
|
||||
const u32 *K = (const u32 *)keys.enckey;
|
||||
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
|
||||
u32 flags;
|
||||
|
||||
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
|
||||
flags = crypto_aead_get_flags(cipher);
|
||||
ret = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(ret)) {
|
||||
crypto_aead_set_flags(cipher, flags);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->cipher_type = CIPHER_TYPE_3DES;
|
||||
|
@@ -22,9 +22,6 @@
|
||||
#include "spum.h"
|
||||
#include "cipher.h"
|
||||
|
||||
/* This array is based on the hash algo type supported in spu.h */
|
||||
char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
|
||||
|
||||
char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
|
||||
"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
|
||||
|
||||
|
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
|
||||
goto do_shash_err;
|
||||
}
|
||||
sdesc->shash.tfm = hash;
|
||||
sdesc->shash.flags = 0x0;
|
||||
|
||||
if (key_len > 0) {
|
||||
rc = crypto_shash_setkey(hash, key, key_len);
|
||||
|
@@ -638,6 +638,39 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_authenc_keys keys;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(aead);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(aead, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = aead_setkey(aead, key, keylen);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int gcm_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@@ -2457,7 +2490,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2479,7 +2512,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2502,7 +2535,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2525,7 +2558,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2548,7 +2581,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2571,7 +2604,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2594,7 +2627,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2617,7 +2650,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2640,7 +2673,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2663,7 +2696,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2686,7 +2719,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2709,7 +2742,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -3460,7 +3493,7 @@ static int __init caam_algapi_init(void)
|
||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
|
||||
u32 arc4_inst;
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
bool registered = false;
|
||||
bool registered = false, gcm_support;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
@@ -3493,7 +3526,7 @@ static int __init caam_algapi_init(void)
|
||||
* First, detect presence and attributes of DES, AES, and MD blocks.
|
||||
*/
|
||||
if (priv->era < 10) {
|
||||
u32 cha_vid, cha_inst;
|
||||
u32 cha_vid, cha_inst, aes_rn;
|
||||
|
||||
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
|
||||
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
|
||||
@@ -3508,6 +3541,10 @@ static int __init caam_algapi_init(void)
|
||||
CHA_ID_LS_ARC4_SHIFT;
|
||||
ccha_inst = 0;
|
||||
ptha_inst = 0;
|
||||
|
||||
aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
|
||||
CHA_ID_LS_AES_MASK;
|
||||
gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
|
||||
} else {
|
||||
u32 aesa, mdha;
|
||||
|
||||
@@ -3523,6 +3560,8 @@ static int __init caam_algapi_init(void)
|
||||
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
|
||||
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
|
||||
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
|
||||
|
||||
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
|
||||
}
|
||||
|
||||
/* If MD is present, limit digest size based on LP256 */
|
||||
@@ -3595,11 +3634,9 @@ static int __init caam_algapi_init(void)
|
||||
if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check support for AES algorithms not available
|
||||
* on LP devices.
|
||||
*/
|
||||
if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
|
||||
/* Skip GCM algorithms if not supported by device */
|
||||
if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
|
||||
alg_aai == OP_ALG_AAI_GCM && !gcm_support)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@@ -292,6 +292,39 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_authenc_keys keys;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(aead);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(aead, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = aead_setkey(aead, key, keylen);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int gcm_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
@@ -667,6 +700,13 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return unlikely(des3_verify_key(skcipher, key)) ?:
|
||||
skcipher_setkey(skcipher, key, keylen);
|
||||
}
|
||||
|
||||
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@@ -1382,7 +1422,7 @@ static struct caam_skcipher_alg driver_algs[] = {
|
||||
.cra_driver_name = "cbc-3des-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = skcipher_setkey,
|
||||
.setkey = des3_skcipher_setkey,
|
||||
.encrypt = skcipher_encrypt,
|
||||
.decrypt = skcipher_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
@@ -1798,7 +1838,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1820,7 +1860,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1843,7 +1883,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1866,7 +1906,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1889,7 +1929,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1912,7 +1952,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1935,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1958,7 +1998,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1981,7 +2021,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2004,7 +2044,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2027,7 +2067,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2050,7 +2090,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
|
@@ -323,6 +323,39 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_authenc_keys keys;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(aead);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(aead, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = aead_setkey(aead, key, keylen);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
bool encrypt)
|
||||
{
|
||||
@@ -938,6 +971,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return unlikely(des3_verify_key(skcipher, key)) ?:
|
||||
skcipher_setkey(skcipher, key, keylen);
|
||||
}
|
||||
|
||||
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@@ -1484,7 +1524,7 @@ static struct caam_skcipher_alg driver_algs[] = {
|
||||
.cra_driver_name = "cbc-3des-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = skcipher_setkey,
|
||||
.setkey = des3_skcipher_setkey,
|
||||
.encrypt = skcipher_encrypt,
|
||||
.decrypt = skcipher_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
@@ -1916,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1938,7 +1978,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1961,7 +2001,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -1984,7 +2024,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2007,7 +2047,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2030,7 +2070,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2053,7 +2093,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2076,7 +2116,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2099,7 +2139,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2122,7 +2162,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2145,7 +2185,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2168,7 +2208,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
||||
"cbc-des3_ede-caam-qi2",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
},
|
||||
.setkey = aead_setkey,
|
||||
.setkey = des3_aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
@@ -2854,6 +2894,7 @@ struct caam_hash_state {
|
||||
struct caam_request caam_req;
|
||||
dma_addr_t buf_dma;
|
||||
dma_addr_t ctx_dma;
|
||||
int ctx_dma_len;
|
||||
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
||||
int buflen_0;
|
||||
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
|
||||
@@ -2927,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
|
||||
struct caam_hash_state *state, int ctx_len,
|
||||
struct dpaa2_sg_entry *qm_sg, u32 flag)
|
||||
{
|
||||
state->ctx_dma_len = ctx_len;
|
||||
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
|
||||
if (dma_mapping_error(dev, state->ctx_dma)) {
|
||||
dev_err(dev, "unable to map ctx\n");
|
||||
@@ -3018,13 +3060,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
|
||||
}
|
||||
|
||||
/* Digest hash size if it is too large */
|
||||
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
u32 *keylen, u8 *key_out, u32 digestsize)
|
||||
static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
||||
u32 digestsize)
|
||||
{
|
||||
struct caam_request *req_ctx;
|
||||
u32 *desc;
|
||||
struct split_key_sh_result result;
|
||||
dma_addr_t src_dma, dst_dma;
|
||||
dma_addr_t key_dma;
|
||||
struct caam_flc *flc;
|
||||
dma_addr_t flc_dma;
|
||||
int ret = -ENOMEM;
|
||||
@@ -3041,17 +3083,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
if (!flc)
|
||||
goto err_flc;
|
||||
|
||||
src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, src_dma)) {
|
||||
dev_err(ctx->dev, "unable to map key input memory\n");
|
||||
goto err_src_dma;
|
||||
}
|
||||
dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map key output memory\n");
|
||||
goto err_dst_dma;
|
||||
key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(ctx->dev, key_dma)) {
|
||||
dev_err(ctx->dev, "unable to map key memory\n");
|
||||
goto err_key_dma;
|
||||
}
|
||||
|
||||
desc = flc->sh_desc;
|
||||
@@ -3076,14 +3111,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(in_fle, src_dma);
|
||||
dpaa2_fl_set_addr(in_fle, key_dma);
|
||||
dpaa2_fl_set_len(in_fle, *keylen);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, key_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
||||
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
@@ -3103,17 +3138,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key,
|
||||
digestsize, 1);
|
||||
}
|
||||
|
||||
dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
err_flc_dma:
|
||||
dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
|
||||
err_dst_dma:
|
||||
dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
|
||||
err_src_dma:
|
||||
dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
|
||||
err_key_dma:
|
||||
kfree(flc);
|
||||
err_flc:
|
||||
kfree(req_ctx);
|
||||
@@ -3135,12 +3168,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
|
||||
|
||||
if (keylen > blocksize) {
|
||||
hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
|
||||
if (!hashed_key)
|
||||
return -ENOMEM;
|
||||
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
|
||||
digestsize);
|
||||
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
|
||||
if (ret)
|
||||
goto bad_free_key;
|
||||
key = hashed_key;
|
||||
@@ -3165,14 +3196,12 @@ bad_free_key:
|
||||
}
|
||||
|
||||
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
|
||||
struct ahash_request *req, int dst_len)
|
||||
struct ahash_request *req)
|
||||
{
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
|
||||
if (edesc->src_nents)
|
||||
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
||||
if (edesc->dst_dma)
|
||||
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
|
||||
|
||||
if (edesc->qm_sg_bytes)
|
||||
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
|
||||
@@ -3187,18 +3216,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
|
||||
|
||||
static inline void ahash_unmap_ctx(struct device *dev,
|
||||
struct ahash_edesc *edesc,
|
||||
struct ahash_request *req, int dst_len,
|
||||
u32 flag)
|
||||
struct ahash_request *req, u32 flag)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
|
||||
if (state->ctx_dma) {
|
||||
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
|
||||
dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
|
||||
state->ctx_dma = 0;
|
||||
}
|
||||
ahash_unmap(dev, edesc, req, dst_len);
|
||||
ahash_unmap(dev, edesc, req);
|
||||
}
|
||||
|
||||
static void ahash_done(void *cbk_ctx, u32 status)
|
||||
@@ -3219,16 +3245,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
|
||||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
@@ -3250,7 +3273,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
|
||||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
switch_buf(state);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
@@ -3283,16 +3306,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
|
||||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump_debug("result@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, ecode);
|
||||
}
|
||||
@@ -3314,7 +3334,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
|
||||
ecode = -EIO;
|
||||
}
|
||||
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
switch_buf(state);
|
||||
qi_cache_free(edesc);
|
||||
|
||||
@@ -3452,7 +3472,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3484,7 +3504,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
@@ -3503,22 +3523,13 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap_ctx;
|
||||
}
|
||||
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[FINALIZE];
|
||||
@@ -3533,7 +3544,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
return ret;
|
||||
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3586,7 +3597,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
@@ -3605,22 +3616,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap_ctx;
|
||||
}
|
||||
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[FINALIZE];
|
||||
@@ -3635,7 +3637,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
return ret;
|
||||
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3704,18 +3706,19 @@ static int ahash_digest(struct ahash_request *req)
|
||||
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
|
||||
}
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_len(in_fle, req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
@@ -3729,7 +3732,7 @@ static int ahash_digest(struct ahash_request *req)
|
||||
return ret;
|
||||
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3755,27 +3758,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
if (!edesc)
|
||||
return ret;
|
||||
|
||||
state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->buf_dma)) {
|
||||
dev_err(ctx->dev, "unable to map src\n");
|
||||
goto unmap;
|
||||
if (buflen) {
|
||||
state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->buf_dma)) {
|
||||
dev_err(ctx->dev, "unable to map src\n");
|
||||
goto unmap;
|
||||
}
|
||||
}
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(in_fle, state->buf_dma);
|
||||
dpaa2_fl_set_len(in_fle, buflen);
|
||||
/*
|
||||
* crypto engine requires the input entry to be present when
|
||||
* "frame list" FD is used.
|
||||
* Since engine does not support FMT=2'b11 (unused entry type), leaving
|
||||
* in_fle zeroized (except for "Final" flag) is the best option.
|
||||
*/
|
||||
if (buflen) {
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(in_fle, state->buf_dma);
|
||||
dpaa2_fl_set_len(in_fle, buflen);
|
||||
}
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
@@ -3790,7 +3805,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
return ret;
|
||||
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3870,6 +3885,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
state->ctx_dma_len = ctx->ctx_len;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
|
||||
ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
@@ -3918,7 +3934,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -3983,11 +3999,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
}
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
|
||||
state->ctx_dma_len = digestsize;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
|
||||
dev_err(ctx->dev, "unable to map dst\n");
|
||||
edesc->dst_dma = 0;
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
dev_err(ctx->dev, "unable to map ctx\n");
|
||||
state->ctx_dma = 0;
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
@@ -3998,7 +4015,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
|
||||
dpaa2_fl_set_addr(out_fle, state->ctx_dma);
|
||||
dpaa2_fl_set_len(out_fle, digestsize);
|
||||
|
||||
req_ctx->flc = &ctx->flc[DIGEST];
|
||||
@@ -4013,7 +4030,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
|
||||
return ret;
|
||||
unmap:
|
||||
ahash_unmap(ctx->dev, edesc, req, digestsize);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -4100,6 +4117,7 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
|
||||
*next_buflen, 0);
|
||||
|
||||
state->ctx_dma_len = ctx->ctx_len;
|
||||
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
|
||||
ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
|
||||
@@ -4143,7 +4161,7 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
|
||||
qi_cache_free(edesc);
|
||||
return ret;
|
||||
}
|
||||
@@ -4162,6 +4180,7 @@ static int ahash_init(struct ahash_request *req)
|
||||
state->final = ahash_final_no_ctx;
|
||||
|
||||
state->ctx_dma = 0;
|
||||
state->ctx_dma_len = 0;
|
||||
state->current_buf = 0;
|
||||
state->buf_dma = 0;
|
||||
state->buflen_0 = 0;
|
||||
|
@@ -162,14 +162,12 @@ struct skcipher_edesc {
|
||||
|
||||
/*
|
||||
* ahash_edesc - s/w-extended ahash descriptor
|
||||
* @dst_dma: I/O virtual address of req->result
|
||||
* @qm_sg_dma: I/O virtual address of h/w link table
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @qm_sg_bytes: length of dma mapped qm_sg space
|
||||
* @sgt: pointer to h/w link table
|
||||
*/
|
||||
struct ahash_edesc {
|
||||
dma_addr_t dst_dma;
|
||||
dma_addr_t qm_sg_dma;
|
||||
int src_nents;
|
||||
int qm_sg_bytes;
|
||||
|
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
static struct akcipher_alg caam_rsa = {
|
||||
.encrypt = caam_rsa_enc,
|
||||
.decrypt = caam_rsa_dec,
|
||||
.sign = caam_rsa_dec,
|
||||
.verify = caam_rsa_enc,
|
||||
.set_pub_key = caam_rsa_set_pub_key,
|
||||
.set_priv_key = caam_rsa_set_priv_key,
|
||||
.max_size = caam_rsa_max_size,
|
||||
|
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
|
||||
return caam_get_era_from_hw(ctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
* ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
|
||||
* have an issue wherein AXI bus transactions may not occur in the correct
|
||||
* order. This isn't a problem running single descriptors, but can be if
|
||||
* running multiple concurrent descriptors. Reworking the driver to throttle
|
||||
* to single requests is impractical, thus the workaround is to limit the AXI
|
||||
* pipeline to a depth of 1 (from it's default of 4) to preclude this situation
|
||||
* from occurring.
|
||||
*/
|
||||
static void handle_imx6_err005766(u32 *mcr)
|
||||
{
|
||||
if (of_machine_is_compatible("fsl,imx6q") ||
|
||||
of_machine_is_compatible("fsl,imx6dl") ||
|
||||
of_machine_is_compatible("fsl,imx6qp"))
|
||||
clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
|
||||
1 << MCFGR_AXIPIPE_SHIFT);
|
||||
}
|
||||
|
||||
static const struct of_device_id caam_match[] = {
|
||||
{
|
||||
.compatible = "fsl,sec-v4.0",
|
||||
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
(sizeof(dma_addr_t) == sizeof(u64) ?
|
||||
MCFGR_LONG_PTR : 0));
|
||||
|
||||
handle_imx6_err005766(&ctrl->mcr);
|
||||
|
||||
/*
|
||||
* Read the Compile Time paramters and SCFGR to determine
|
||||
* if Virtualization is enabled for this platform
|
||||
|
@@ -138,7 +138,7 @@ static const struct {
|
||||
{ 0x46, "Annotation length exceeds offset (reuse mode)"},
|
||||
{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
|
||||
{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
|
||||
{ 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
|
||||
{ 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
|
||||
{ 0x51, "Unsupported IF reuse mode"},
|
||||
{ 0x52, "Unsupported FL use mode"},
|
||||
{ 0x53, "Unsupported RJD use mode"},
|
||||
|
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
|
||||
atomic_t tfm_count ____cacheline_aligned;
|
||||
|
||||
/* Job ring info */
|
||||
int ringsize; /* Size of rings (assume input = output) */
|
||||
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
|
||||
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
|
||||
int inp_ring_write_index; /* Input index "tail" */
|
||||
u32 inpring_avail; /* Number of free entries in input ring */
|
||||
int head; /* entinfo (s/w ring) head index */
|
||||
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
|
||||
spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
|
||||
int out_ring_read_index; /* Output index "tail" */
|
||||
int tail; /* entinfo (s/w ring) tail index */
|
||||
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
|
||||
|
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
|
||||
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
|
||||
u32 *userdesc, userstatus;
|
||||
void *userarg;
|
||||
u32 outring_used = 0;
|
||||
|
||||
while (rd_reg32(&jrp->rregs->outring_used)) {
|
||||
while (outring_used ||
|
||||
(outring_used = rd_reg32(&jrp->rregs->outring_used))) {
|
||||
|
||||
head = READ_ONCE(jrp->head);
|
||||
|
||||
spin_lock(&jrp->outlock);
|
||||
|
||||
sw_idx = tail = jrp->tail;
|
||||
hw_idx = jrp->out_ring_read_index;
|
||||
|
||||
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
|
||||
/* mark completed, avoid matching on a recycled desc addr */
|
||||
jrp->entinfo[sw_idx].desc_addr_dma = 0;
|
||||
|
||||
/* Stash callback params for use outside of lock */
|
||||
/* Stash callback params */
|
||||
usercall = jrp->entinfo[sw_idx].callbk;
|
||||
userarg = jrp->entinfo[sw_idx].cbkarg;
|
||||
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
|
||||
@@ -213,7 +213,7 @@ static void caam_jr_dequeue(unsigned long devarg)
|
||||
mb();
|
||||
|
||||
/* set done */
|
||||
wr_reg32(&jrp->rregs->outring_rmvd, 1);
|
||||
wr_reg32_relaxed(&jrp->rregs->outring_rmvd, 1);
|
||||
|
||||
jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
|
||||
(JOBR_DEPTH - 1);
|
||||
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
|
||||
jrp->tail = tail;
|
||||
}
|
||||
|
||||
spin_unlock(&jrp->outlock);
|
||||
|
||||
/* Finally, execute user's callback */
|
||||
usercall(dev, userdesc, userstatus, userarg);
|
||||
outring_used--;
|
||||
}
|
||||
|
||||
/* reenable / unmask IRQs */
|
||||
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
head = jrp->head;
|
||||
tail = READ_ONCE(jrp->tail);
|
||||
|
||||
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
|
||||
if (!jrp->inpring_avail ||
|
||||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
|
||||
spin_unlock_bh(&jrp->inplock);
|
||||
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
|
||||
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
head_entry->cbkarg = areq;
|
||||
head_entry->desc_addr_dma = desc_dma;
|
||||
|
||||
jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
|
||||
jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
|
||||
|
||||
/*
|
||||
* Guarantee that the descriptor's DMA address has been written to
|
||||
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
|
||||
(JOBR_DEPTH - 1);
|
||||
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
|
||||
|
||||
/*
|
||||
* Ensure that all job information has been written before
|
||||
* notifying CAAM that a new job was added to the input ring.
|
||||
* notifying CAAM that a new job was added to the input ring
|
||||
* using a memory barrier. The wr_reg32() uses api iowrite32()
|
||||
* to do the register write. iowrite32() issues a memory barrier
|
||||
* before the write operation.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
|
||||
|
||||
jrp->inpring_avail--;
|
||||
if (!jrp->inpring_avail)
|
||||
jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
|
||||
|
||||
spin_unlock_bh(&jrp->inplock);
|
||||
|
||||
return 0;
|
||||
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
|
||||
jrp->entinfo[i].desc_addr_dma = !0;
|
||||
|
||||
/* Setup rings */
|
||||
jrp->inp_ring_write_index = 0;
|
||||
jrp->out_ring_read_index = 0;
|
||||
jrp->head = 0;
|
||||
jrp->tail = 0;
|
||||
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
|
||||
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
|
||||
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
|
||||
|
||||
jrp->ringsize = JOBR_DEPTH;
|
||||
jrp->inpring_avail = JOBR_DEPTH;
|
||||
|
||||
spin_lock_init(&jrp->inplock);
|
||||
spin_lock_init(&jrp->outlock);
|
||||
|
||||
/* Select interrupt coalescing parameters */
|
||||
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
|
||||
|
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
|
||||
/* Create a new req FQ in parked state */
|
||||
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
|
||||
drv_ctx->context_a, 0);
|
||||
if (IS_ERR_OR_NULL(new_fq)) {
|
||||
if (IS_ERR(new_fq)) {
|
||||
dev_err(qidev, "FQ allocation for shdesc update failed\n");
|
||||
return PTR_ERR(new_fq);
|
||||
}
|
||||
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
|
||||
/* Attach request FQ */
|
||||
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
|
||||
QMAN_INITFQ_FLAG_SCHED);
|
||||
if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
|
||||
if (IS_ERR(drv_ctx->req_fq)) {
|
||||
dev_err(qidev, "create_caam_req_fq failed\n");
|
||||
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
|
||||
kfree(drv_ctx);
|
||||
|
@@ -96,6 +96,14 @@ cpu_to_caam(16)
|
||||
cpu_to_caam(32)
|
||||
cpu_to_caam(64)
|
||||
|
||||
static inline void wr_reg32_relaxed(void __iomem *reg, u32 data)
|
||||
{
|
||||
if (caam_little_end)
|
||||
writel_relaxed(data, reg);
|
||||
else
|
||||
writel_relaxed(cpu_to_be32(data), reg);
|
||||
}
|
||||
|
||||
static inline void wr_reg32(void __iomem *reg, u32 data)
|
||||
{
|
||||
if (caam_little_end)
|
||||
@@ -253,6 +261,9 @@ struct version_regs {
|
||||
#define CHA_VER_VID_SHIFT 24
|
||||
#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
|
||||
|
||||
/* CHA Miscellaneous Information - AESA_MISC specific */
|
||||
#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
|
||||
|
||||
/*
|
||||
* caam_perfmon - Performance Monitor/Secure Memory Status/
|
||||
* CAAM Global Status/Component Version IDs
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/crypto_wq.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/xts.h>
|
||||
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
u32 flags = crypto_ablkcipher_get_flags(cipher);
|
||||
int err;
|
||||
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
return cvm_setkey(cipher, key, keylen, DES3_CBC);
|
||||
}
|
||||
|
||||
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
u32 keylen)
|
||||
{
|
||||
u32 flags = crypto_ablkcipher_get_flags(cipher);
|
||||
int err;
|
||||
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
return cvm_setkey(cipher, key, keylen, DES3_ECB);
|
||||
}
|
||||
|
||||
static int cvm_enc_dec_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
|
||||
sizeof(struct ablkcipher_request);
|
||||
/* Additional memory for ablkcipher_request is
|
||||
* allocated since the cryptd daemon uses
|
||||
* this memory for request_ctx information
|
||||
*/
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
|
||||
cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
|
||||
}
|
||||
|
||||
void cptvf_device_init(struct cpt_vf *cptvf)
|
||||
static void cptvf_device_init(struct cpt_vf *cptvf)
|
||||
{
|
||||
u64 base_addr = 0;
|
||||
|
||||
|
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
|
||||
mbx->data);
|
||||
}
|
||||
|
||||
/* ACKs PF's mailbox message
|
||||
*/
|
||||
void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
|
||||
{
|
||||
mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
|
||||
cptvf_send_msg_to_pf(cptvf, mbx);
|
||||
}
|
||||
|
||||
/* NACKs PF's mailbox message that VF is not able to
|
||||
* complete the action
|
||||
*/
|
||||
void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
|
||||
{
|
||||
mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
|
||||
cptvf_send_msg_to_pf(cptvf, mbx);
|
||||
}
|
||||
|
||||
/* Interrupt handler to handle mailbox messages from VFs */
|
||||
void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
|
||||
{
|
||||
|
@@ -223,7 +223,7 @@ scatter_gather_clean:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
|
||||
static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
|
||||
u32 qno)
|
||||
{
|
||||
struct pci_dev *pdev = cptvf->pdev;
|
||||
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_request_cleanup(struct cpt_vf *cptvf,
|
||||
static void do_request_cleanup(struct cpt_vf *cptvf,
|
||||
struct cpt_info_buffer *info)
|
||||
{
|
||||
int i;
|
||||
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
|
||||
kzfree(info);
|
||||
}
|
||||
|
||||
void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
|
||||
static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
|
||||
{
|
||||
struct pci_dev *pdev = cptvf->pdev;
|
||||
|
||||
|
@@ -18,26 +18,6 @@
|
||||
|
||||
#define GCM_AES_SALT_SIZE 4
|
||||
|
||||
/**
|
||||
* struct nitrox_crypt_params - Params to set nitrox crypto request.
|
||||
* @cryptlen: Encryption/Decryption data length
|
||||
* @authlen: Assoc data length + Cryptlen
|
||||
* @srclen: Input buffer length
|
||||
* @dstlen: Output buffer length
|
||||
* @iv: IV data
|
||||
* @ivsize: IV data length
|
||||
* @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
|
||||
*/
|
||||
struct nitrox_crypt_params {
|
||||
unsigned int cryptlen;
|
||||
unsigned int authlen;
|
||||
unsigned int srclen;
|
||||
unsigned int dstlen;
|
||||
u8 *iv;
|
||||
int ivsize;
|
||||
u8 ctrl_arg;
|
||||
};
|
||||
|
||||
union gph_p3 {
|
||||
struct {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
|
||||
static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
|
||||
struct scatterlist *src, char *iv, int ivsize,
|
||||
int buflen)
|
||||
{
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
int nents = sg_nents_for_len(areq->src, buflen) + 1;
|
||||
int nents = sg_nents_for_len(src, buflen);
|
||||
int ret;
|
||||
|
||||
if (nents < 0)
|
||||
return nents;
|
||||
|
||||
/* IV entry */
|
||||
nents += 1;
|
||||
/* Allocate buffer to hold IV and input scatterlist array */
|
||||
ret = alloc_src_req_buf(nkreq, nents, ivsize);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
|
||||
nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
|
||||
nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
|
||||
static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
|
||||
struct scatterlist *dst, int ivsize, int buflen)
|
||||
{
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
int nents = sg_nents_for_len(areq->dst, buflen) + 3;
|
||||
int nents = sg_nents_for_len(dst, buflen);
|
||||
int ret;
|
||||
|
||||
if (nents < 0)
|
||||
return nents;
|
||||
|
||||
/* IV, ORH, COMPLETION entries */
|
||||
nents += 3;
|
||||
/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
|
||||
* array
|
||||
*/
|
||||
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
|
||||
|
||||
nitrox_creq_set_orh(nkreq);
|
||||
nitrox_creq_set_comp(nkreq);
|
||||
nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
|
||||
nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_src_sglist(struct aead_request *areq)
|
||||
static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
|
||||
{
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
|
||||
kfree(nkreq->src);
|
||||
}
|
||||
|
||||
static void free_dst_sglist(struct aead_request *areq)
|
||||
static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
|
||||
{
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
|
||||
kfree(nkreq->dst);
|
||||
}
|
||||
|
||||
static int nitrox_set_creq(struct aead_request *areq,
|
||||
struct nitrox_crypt_params *params)
|
||||
static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
|
||||
{
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
struct se_crypto_request *creq = &nkreq->creq;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct se_crypto_request *creq = &rctx->nkreq.creq;
|
||||
union gph_p3 param3;
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
int ret;
|
||||
|
||||
creq->flags = areq->base.flags;
|
||||
creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
creq->flags = rctx->flags;
|
||||
creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
|
||||
GFP_ATOMIC;
|
||||
|
||||
creq->ctrl.value = 0;
|
||||
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
|
||||
creq->ctrl.s.arg = params->ctrl_arg;
|
||||
creq->ctrl.s.arg = rctx->ctrl_arg;
|
||||
|
||||
creq->gph.param0 = cpu_to_be16(params->cryptlen);
|
||||
creq->gph.param1 = cpu_to_be16(params->authlen);
|
||||
creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
|
||||
creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
|
||||
creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
|
||||
creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
|
||||
param3.iv_offset = 0;
|
||||
param3.auth_offset = params->ivsize;
|
||||
param3.auth_offset = rctx->ivsize;
|
||||
creq->gph.param3 = cpu_to_be16(param3.param);
|
||||
|
||||
creq->ctx_handle = nctx->u.ctx_handle;
|
||||
creq->ctx_handle = rctx->ctx_handle;
|
||||
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
|
||||
|
||||
ret = alloc_src_sglist(areq, params->iv, params->ivsize,
|
||||
params->srclen);
|
||||
ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
|
||||
rctx->srclen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
|
||||
ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
|
||||
rctx->dstlen);
|
||||
if (ret) {
|
||||
free_src_sglist(areq);
|
||||
free_src_sglist(&rctx->nkreq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
|
||||
static void nitrox_aead_callback(void *arg, int err)
|
||||
{
|
||||
struct aead_request *areq = arg;
|
||||
struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
|
||||
|
||||
free_src_sglist(areq);
|
||||
free_dst_sglist(areq);
|
||||
free_src_sglist(&rctx->nkreq);
|
||||
free_dst_sglist(&rctx->nkreq);
|
||||
if (err) {
|
||||
pr_err_ratelimited("request failed status 0x%0x\n", err);
|
||||
err = -EINVAL;
|
||||
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
struct se_crypto_request *creq = &nkreq->creq;
|
||||
struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
|
||||
struct se_crypto_request *creq = &rctx->nkreq.creq;
|
||||
struct flexi_crypto_context *fctx = nctx->u.fctx;
|
||||
struct nitrox_crypt_params params;
|
||||
int ret;
|
||||
|
||||
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.cryptlen = areq->cryptlen;
|
||||
params.authlen = areq->assoclen + params.cryptlen;
|
||||
params.srclen = params.authlen;
|
||||
params.dstlen = params.srclen + aead->authsize;
|
||||
params.iv = &areq->iv[GCM_AES_SALT_SIZE];
|
||||
params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
|
||||
params.ctrl_arg = ENCRYPT;
|
||||
ret = nitrox_set_creq(areq, ¶ms);
|
||||
rctx->cryptlen = areq->cryptlen;
|
||||
rctx->assoclen = areq->assoclen;
|
||||
rctx->srclen = areq->assoclen + areq->cryptlen;
|
||||
rctx->dstlen = rctx->srclen + aead->authsize;
|
||||
rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
|
||||
rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
|
||||
rctx->flags = areq->base.flags;
|
||||
rctx->ctx_handle = nctx->u.ctx_handle;
|
||||
rctx->src = areq->src;
|
||||
rctx->dst = areq->dst;
|
||||
rctx->ctrl_arg = ENCRYPT;
|
||||
ret = nitrox_set_creq(rctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
|
||||
struct se_crypto_request *creq = &nkreq->creq;
|
||||
struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
|
||||
struct se_crypto_request *creq = &rctx->nkreq.creq;
|
||||
struct flexi_crypto_context *fctx = nctx->u.fctx;
|
||||
struct nitrox_crypt_params params;
|
||||
int ret;
|
||||
|
||||
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.cryptlen = areq->cryptlen - aead->authsize;
|
||||
params.authlen = areq->assoclen + params.cryptlen;
|
||||
params.srclen = areq->cryptlen + areq->assoclen;
|
||||
params.dstlen = params.srclen - aead->authsize;
|
||||
params.iv = &areq->iv[GCM_AES_SALT_SIZE];
|
||||
params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
|
||||
params.ctrl_arg = DECRYPT;
|
||||
ret = nitrox_set_creq(areq, ¶ms);
|
||||
rctx->cryptlen = areq->cryptlen - aead->authsize;
|
||||
rctx->assoclen = areq->assoclen;
|
||||
rctx->srclen = areq->cryptlen + areq->assoclen;
|
||||
rctx->dstlen = rctx->srclen - aead->authsize;
|
||||
rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
|
||||
rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
|
||||
rctx->flags = areq->base.flags;
|
||||
rctx->ctx_handle = nctx->u.ctx_handle;
|
||||
rctx->src = areq->src;
|
||||
rctx->dst = areq->dst;
|
||||
rctx->ctrl_arg = DECRYPT;
|
||||
ret = nitrox_set_creq(rctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_aes_gcm_init(struct crypto_aead *aead)
|
||||
static int nitrox_gcm_common_init(struct crypto_aead *aead)
|
||||
{
|
||||
int ret;
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
|
||||
flags->w0.auth_input_type = 1;
|
||||
flags->f = be64_to_cpu(flags->f);
|
||||
|
||||
crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
|
||||
sizeof(struct nitrox_kcrypt_request));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_aes_gcm_init(struct crypto_aead *aead)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nitrox_gcm_common_init(aead);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crypto_aead_set_reqsize(aead,
|
||||
sizeof(struct aead_request) +
|
||||
sizeof(struct nitrox_aead_rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
|
||||
nctx->ndev = NULL;
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
struct flexi_crypto_context *fctx = nctx->u.fctx;
|
||||
int ret;
|
||||
|
||||
if (keylen < GCM_AES_SALT_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
keylen -= GCM_AES_SALT_SIZE;
|
||||
ret = nitrox_aes_gcm_setkey(aead, key, keylen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
|
||||
unsigned int authsize)
|
||||
{
|
||||
switch (authsize) {
|
||||
case 8:
|
||||
case 12:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nitrox_aead_setauthsize(aead, authsize);
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
|
||||
{
|
||||
struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
|
||||
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
|
||||
unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (areq->assoclen != 16 && areq->assoclen != 20)
|
||||
return -EINVAL;
|
||||
|
||||
scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
|
||||
sg_init_table(rctx->src, 3);
|
||||
sg_set_buf(rctx->src, rctx->assoc, assoclen);
|
||||
sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
|
||||
if (sg != rctx->src + 1)
|
||||
sg_chain(rctx->src, 2, sg);
|
||||
|
||||
if (areq->src != areq->dst) {
|
||||
sg_init_table(rctx->dst, 3);
|
||||
sg_set_buf(rctx->dst, rctx->assoc, assoclen);
|
||||
sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
|
||||
if (sg != rctx->dst + 1)
|
||||
sg_chain(rctx->dst, 2, sg);
|
||||
}
|
||||
|
||||
aead_rctx->src = rctx->src;
|
||||
aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nitrox_rfc4106_callback(void *arg, int err)
|
||||
{
|
||||
struct aead_request *areq = arg;
|
||||
struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
|
||||
struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
|
||||
|
||||
free_src_sglist(nkreq);
|
||||
free_dst_sglist(nkreq);
|
||||
if (err) {
|
||||
pr_err_ratelimited("request failed status 0x%0x\n", err);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
areq->base.complete(&areq->base, err);
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_enc(struct aead_request *areq)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
|
||||
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
|
||||
struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
|
||||
int ret;
|
||||
|
||||
aead_rctx->cryptlen = areq->cryptlen;
|
||||
aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
|
||||
aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
|
||||
aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
|
||||
aead_rctx->iv = areq->iv;
|
||||
aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
|
||||
aead_rctx->flags = areq->base.flags;
|
||||
aead_rctx->ctx_handle = nctx->u.ctx_handle;
|
||||
aead_rctx->ctrl_arg = ENCRYPT;
|
||||
|
||||
ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nitrox_set_creq(aead_rctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* send the crypto request */
|
||||
return nitrox_process_se_request(nctx->ndev, creq,
|
||||
nitrox_rfc4106_callback, areq);
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_dec(struct aead_request *areq)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
|
||||
struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
|
||||
struct nitrox_aead_rctx *aead_rctx = &rctx->base;
|
||||
struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
|
||||
int ret;
|
||||
|
||||
aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
|
||||
aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
|
||||
aead_rctx->srclen =
|
||||
areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
|
||||
aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
|
||||
aead_rctx->iv = areq->iv;
|
||||
aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
|
||||
aead_rctx->flags = areq->base.flags;
|
||||
aead_rctx->ctx_handle = nctx->u.ctx_handle;
|
||||
aead_rctx->ctrl_arg = DECRYPT;
|
||||
|
||||
ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nitrox_set_creq(aead_rctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* send the crypto request */
|
||||
return nitrox_process_se_request(nctx->ndev, creq,
|
||||
nitrox_rfc4106_callback, areq);
|
||||
}
|
||||
|
||||
static int nitrox_rfc4106_init(struct crypto_aead *aead)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nitrox_gcm_common_init(aead);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
|
||||
sizeof(struct nitrox_rfc4106_rctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct aead_alg nitrox_aeads[] = { {
|
||||
.base = {
|
||||
.cra_name = "gcm(aes)",
|
||||
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
|
||||
.exit = nitrox_aead_exit,
|
||||
.ivsize = GCM_AES_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
}, {
|
||||
.base = {
|
||||
.cra_name = "rfc4106(gcm(aes))",
|
||||
.cra_driver_name = "n5_rfc4106",
|
||||
.cra_priority = PRIO,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
.setkey = nitrox_rfc4106_setkey,
|
||||
.setauthsize = nitrox_rfc4106_setauthsize,
|
||||
.encrypt = nitrox_rfc4106_enc,
|
||||
.decrypt = nitrox_rfc4106_dec,
|
||||
.init = nitrox_rfc4106_init,
|
||||
.exit = nitrox_aead_exit,
|
||||
.ivsize = GCM_RFC4106_IV_SIZE,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
} };
|
||||
|
||||
int nitrox_register_aeads(void)
|
||||
|
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
|
||||
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
|
||||
}
|
||||
|
||||
static const char *get_core_option(u8 se_cores, u8 ae_cores)
|
||||
{
|
||||
const char *option = "";
|
||||
|
||||
if (ae_cores == AE_MAX_CORES) {
|
||||
switch (se_cores) {
|
||||
case SE_MAX_CORES:
|
||||
option = "60";
|
||||
break;
|
||||
case 40:
|
||||
option = "60s";
|
||||
break;
|
||||
}
|
||||
} else if (ae_cores == (AE_MAX_CORES / 2)) {
|
||||
option = "30";
|
||||
} else {
|
||||
option = "60i";
|
||||
}
|
||||
|
||||
return option;
|
||||
}
|
||||
|
||||
static const char *get_feature_option(u8 zip_cores, int core_freq)
|
||||
{
|
||||
if (zip_cores == 0)
|
||||
return "";
|
||||
else if (zip_cores < ZIP_MAX_CORES)
|
||||
return "-C15";
|
||||
|
||||
if (core_freq >= 850)
|
||||
return "-C45";
|
||||
else if (core_freq >= 750)
|
||||
return "-C35";
|
||||
else if (core_freq >= 550)
|
||||
return "-C25";
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
void nitrox_get_hwinfo(struct nitrox_device *ndev)
|
||||
{
|
||||
union emu_fuse_map emu_fuse;
|
||||
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
|
||||
ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
|
||||
}
|
||||
|
||||
/* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
|
||||
if (ndev->hw.ae_cores == AE_MAX_CORES) {
|
||||
switch (ndev->hw.se_cores) {
|
||||
case SE_MAX_CORES:
|
||||
i = snprintf(name, sizeof(name), "CNN5560");
|
||||
break;
|
||||
case 40:
|
||||
i = snprintf(name, sizeof(name), "CNN5560s");
|
||||
break;
|
||||
}
|
||||
} else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
|
||||
i = snprintf(name, sizeof(name), "CNN5530");
|
||||
} else {
|
||||
i = snprintf(name, sizeof(name), "CNN5560i");
|
||||
}
|
||||
|
||||
snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
|
||||
ndev->hw.freq, ndev->hw.revision_id);
|
||||
/* determine the partname
|
||||
* CNN55<core option>-<freq><pincount>-<feature option>-<rev>
|
||||
*/
|
||||
snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
|
||||
get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
|
||||
ndev->hw.freq,
|
||||
get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
|
||||
ndev->hw.revision_id);
|
||||
|
||||
/* copy partname */
|
||||
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
|
||||
|
@@ -211,6 +211,50 @@ struct nitrox_kcrypt_request {
|
||||
u8 *dst;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nitrox_aead_rctx - AEAD request context
|
||||
* @nkreq: Base request context
|
||||
* @cryptlen: Encryption/Decryption data length
|
||||
* @assoclen: AAD length
|
||||
* @srclen: Input buffer length
|
||||
* @dstlen: Output buffer length
|
||||
* @iv: IV data
|
||||
* @ivsize: IV data length
|
||||
* @flags: AEAD req flags
|
||||
* @ctx_handle: Device context handle
|
||||
* @src: Source sglist
|
||||
* @dst: Destination sglist
|
||||
* @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
|
||||
*/
|
||||
struct nitrox_aead_rctx {
|
||||
struct nitrox_kcrypt_request nkreq;
|
||||
unsigned int cryptlen;
|
||||
unsigned int assoclen;
|
||||
unsigned int srclen;
|
||||
unsigned int dstlen;
|
||||
u8 *iv;
|
||||
int ivsize;
|
||||
u32 flags;
|
||||
u64 ctx_handle;
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
u8 ctrl_arg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nitrox_rfc4106_rctx - rfc4106 cipher request context
|
||||
* @base: AEAD request context
|
||||
* @src: Source sglist
|
||||
* @dst: Destination sglist
|
||||
* @assoc: AAD
|
||||
*/
|
||||
struct nitrox_rfc4106_rctx {
|
||||
struct nitrox_aead_rctx base;
|
||||
struct scatterlist src[3];
|
||||
struct scatterlist dst[3];
|
||||
u8 assoc[20];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pkt_instr_hdr - Packet Instruction Header
|
||||
* @g: Gather used
|
||||
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
|
||||
struct scatterlist *sg = to_sg;
|
||||
unsigned int sglen;
|
||||
|
||||
for (; buflen; buflen -= sglen) {
|
||||
for (; buflen && from_sg; buflen -= sglen) {
|
||||
sglen = from_sg->length;
|
||||
if (sglen > buflen)
|
||||
sglen = buflen;
|
||||
|
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
|
||||
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
if (keylen != DES3_EDE_KEY_SIZE) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return nitrox_skcipher_setkey(cipher, 0, key, keylen);
|
||||
return unlikely(des3_verify_key(cipher, key)) ?:
|
||||
nitrox_skcipher_setkey(cipher, 0, key, keylen);
|
||||
}
|
||||
|
||||
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
|
||||
|
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
|
||||
zip_ops->csum = 1; /* Adler checksum desired */
|
||||
}
|
||||
|
||||
int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
|
||||
static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
|
||||
{
|
||||
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
|
||||
struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
|
||||
@@ -107,7 +107,7 @@ err_comp_input:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
|
||||
static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
|
||||
{
|
||||
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
|
||||
struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
|
||||
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
|
||||
zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
|
||||
}
|
||||
|
||||
int zip_compress(const u8 *src, unsigned int slen,
|
||||
static int zip_compress(const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen,
|
||||
struct zip_kernel_ctx *zip_ctx)
|
||||
{
|
||||
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int zip_decompress(const u8 *src, unsigned int slen,
|
||||
static int zip_decompress(const u8 *src, unsigned int slen,
|
||||
u8 *dst, unsigned int *dlen,
|
||||
struct zip_kernel_ctx *zip_ctx)
|
||||
{
|
||||
|
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
struct ccp_crypto_ablkcipher_alg *alg =
|
||||
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
|
||||
u32 *flags = &tfm->base.crt_flags;
|
||||
int err;
|
||||
|
||||
|
||||
/* From des_generic.c:
|
||||
*
|
||||
* RFC2451:
|
||||
* If the first two or last two independent 64-bit keys are
|
||||
* equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
|
||||
* same as DES. Implementers MUST reject keys that exhibit this
|
||||
* property.
|
||||
*/
|
||||
const u32 *K = (const u32 *)key;
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
err = __des3_verify_key(flags, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
/* It's not clear that there is any support for a keysize of 112.
|
||||
* If needed, the caller should make K1 == K3
|
||||
|
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
|
||||
if (buf[nskip])
|
||||
break;
|
||||
*kplen = sz - nskip;
|
||||
*kpbuf = kzalloc(*kplen, GFP_KERNEL);
|
||||
*kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
|
||||
if (!*kpbuf)
|
||||
return -ENOMEM;
|
||||
memcpy(*kpbuf, buf + nskip, *kplen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
static struct akcipher_alg ccp_rsa_defaults = {
|
||||
.encrypt = ccp_rsa_encrypt,
|
||||
.decrypt = ccp_rsa_decrypt,
|
||||
.sign = ccp_rsa_decrypt,
|
||||
.verify = ccp_rsa_encrypt,
|
||||
.set_pub_key = ccp_rsa_setpubkey,
|
||||
.set_priv_key = ccp_rsa_setprivkey,
|
||||
.max_size = ccp_rsa_maxsize,
|
||||
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
|
||||
}
|
||||
};
|
||||
|
||||
int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
|
||||
static int ccp_register_rsa_alg(struct list_head *head,
|
||||
const struct ccp_rsa_def *def)
|
||||
{
|
||||
struct ccp_crypto_akcipher_alg *ccp_alg;
|
||||
struct akcipher_alg *alg;
|
||||
|
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
if (key_len > block_size) {
|
||||
/* Must hash the input key */
|
||||
sdesc->tfm = shash;
|
||||
sdesc->flags = crypto_ahash_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
ret = crypto_shash_digest(sdesc, key, key_len,
|
||||
ctx->u.sha.key);
|
||||
|
@@ -583,6 +583,69 @@ e_free:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
|
||||
{
|
||||
struct sev_user_data_get_id2 input;
|
||||
struct sev_data_get_id *data;
|
||||
void *id_blob = NULL;
|
||||
int ret;
|
||||
|
||||
/* SEV GET_ID is available from SEV API v0.16 and up */
|
||||
if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Check if we have write access to the userspace buffer */
|
||||
if (input.address &&
|
||||
input.length &&
|
||||
!access_ok(input.address, input.length))
|
||||
return -EFAULT;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (input.address && input.length) {
|
||||
id_blob = kmalloc(input.length, GFP_KERNEL);
|
||||
if (!id_blob) {
|
||||
kfree(data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
data->address = __psp_pa(id_blob);
|
||||
data->len = input.length;
|
||||
}
|
||||
|
||||
ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
|
||||
|
||||
/*
|
||||
* Firmware will return the length of the ID value (either the minimum
|
||||
* required length or the actual length written), return it to the user.
|
||||
*/
|
||||
input.length = data->len;
|
||||
|
||||
if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
if (id_blob) {
|
||||
if (copy_to_user((void __user *)input.address,
|
||||
id_blob, data->len)) {
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
}
|
||||
|
||||
e_free:
|
||||
kfree(id_blob);
|
||||
kfree(data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
|
||||
{
|
||||
struct sev_data_get_id *data;
|
||||
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
|
||||
ret = sev_ioctl_do_pdh_export(&input);
|
||||
break;
|
||||
case SEV_GET_ID:
|
||||
pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
|
||||
ret = sev_ioctl_do_get_id(&input);
|
||||
break;
|
||||
case SEV_GET_ID2:
|
||||
ret = sev_ioctl_do_get_id2(&input);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
|
||||
rc = sev_platform_init(&error);
|
||||
if (rc) {
|
||||
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
|
||||
goto err;
|
||||
return;
|
||||
}
|
||||
|
||||
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
|
||||
|
@@ -1,4 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
|
||||
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@@ -23,12 +23,8 @@
|
||||
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
|
||||
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
|
||||
|
||||
#define AES_CCM_RFC4309_NONCE_SIZE 3
|
||||
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
|
||||
|
||||
/* Value of each ICV_CMP byte (of 8) in case of success */
|
||||
#define ICV_VERIF_OK 0x01
|
||||
|
||||
struct cc_aead_handle {
|
||||
cc_sram_addr_t sram_workspace_addr;
|
||||
struct list_head aead_list;
|
||||
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
/* BACKLOG notification */
|
||||
if (err == -EINPROGRESS)
|
||||
goto done;
|
||||
|
||||
cc_unmap_aead_request(dev, areq);
|
||||
|
||||
/* Restore ordinary iv pointer */
|
||||
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
|
||||
/* This function prepers the user key so it can pass to the hmac processing
|
||||
* (copy to intenral buffer or hash in case of key longer than block
|
||||
*/
|
||||
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
||||
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
|
||||
unsigned int keylen)
|
||||
{
|
||||
dma_addr_t key_dma_addr = 0;
|
||||
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int hashmode;
|
||||
unsigned int idx = 0;
|
||||
int rc = 0;
|
||||
u8 *key = NULL;
|
||||
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
|
||||
dma_addr_t padded_authkey_dma_addr =
|
||||
ctx->auth_state.hmac.padded_authkey_dma_addr;
|
||||
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
||||
}
|
||||
|
||||
if (keylen != 0) {
|
||||
|
||||
key = kmemdup(authkey, keylen, GFP_KERNEL);
|
||||
if (!key)
|
||||
return -ENOMEM;
|
||||
|
||||
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
key, keylen);
|
||||
kzfree(key);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (keylen > blocksize) {
|
||||
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
|
||||
if (key_dma_addr)
|
||||
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
|
||||
|
||||
kzfree(key);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -650,6 +659,39 @@ setkey_error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_authenc_keys keys;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(aead);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(aead, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = cc_aead_setkey(aead, key, keylen);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
|
||||
dev_dbg(dev, "ASSOC buffer type DLLI\n");
|
||||
hw_desc_init(&desc[idx]);
|
||||
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
|
||||
areq->assoclen, NS_BIT);
|
||||
areq_ctx->assoclen, NS_BIT);
|
||||
set_flow_mode(&desc[idx], flow_mode);
|
||||
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
|
||||
areq_ctx->cryptlen > 0)
|
||||
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
|
||||
struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
unsigned int idx = *seq_size;
|
||||
|
||||
/* Hash associated data */
|
||||
if (req->assoclen > 0)
|
||||
if (areq_ctx->assoclen > 0)
|
||||
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
|
||||
|
||||
/* Hash IV */
|
||||
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
|
||||
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
|
||||
if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
|
||||
if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
|
||||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
|
||||
!req_ctx->is_single_pass) {
|
||||
!req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
|
||||
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
|
||||
(unsigned int)ctx->drvdata->mlli_sram_addr,
|
||||
req_ctx->mlli_params.mlli_len);
|
||||
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct device *dev = drvdata_to_dev(ctx->drvdata);
|
||||
unsigned int assoclen = req->assoclen;
|
||||
unsigned int assoclen = areq_ctx->assoclen;
|
||||
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
|
||||
(req->cryptlen - ctx->authsize) : req->cryptlen;
|
||||
|
||||
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
idx++;
|
||||
|
||||
/* process assoc data */
|
||||
if (req->assoclen > 0) {
|
||||
if (req_ctx->assoclen > 0) {
|
||||
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
|
||||
} else {
|
||||
hw_desc_init(&desc[idx]);
|
||||
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
|
||||
* NIST Special Publication 800-38C
|
||||
*/
|
||||
*b0 |= (8 * ((m - 2) / 2));
|
||||
if (req->assoclen > 0)
|
||||
if (req_ctx->assoclen > 0)
|
||||
*b0 |= 64; /* Enable bit 6 if Adata exists. */
|
||||
|
||||
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
|
||||
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
|
||||
/* END of "taken from crypto/ccm.c" */
|
||||
|
||||
/* l(a) - size of associated data. */
|
||||
req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
|
||||
req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
|
||||
|
||||
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
|
||||
req->iv[15] = 1;
|
||||
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
|
||||
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
|
||||
CCM_BLOCK_IV_SIZE);
|
||||
req->iv = areq_ctx->ctr_iv;
|
||||
req->assoclen -= CCM_BLOCK_IV_SIZE;
|
||||
areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
|
||||
}
|
||||
|
||||
static void cc_set_ghash_desc(struct aead_request *req,
|
||||
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
|
||||
// for gcm and rfc4106.
|
||||
cc_set_ghash_desc(req, desc, seq_size);
|
||||
/* process(ghash) assoc data */
|
||||
if (req->assoclen > 0)
|
||||
if (req_ctx->assoclen > 0)
|
||||
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
|
||||
cc_set_gctr_desc(req, desc, seq_size);
|
||||
/* process(gctr+ghash) */
|
||||
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
|
||||
(req->cryptlen - ctx->authsize);
|
||||
__be32 counter = cpu_to_be32(2);
|
||||
|
||||
dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
|
||||
__func__, cryptlen, req->assoclen, ctx->authsize);
|
||||
dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
|
||||
__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
|
||||
|
||||
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
|
||||
|
||||
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
|
||||
if (!req_ctx->plaintext_authenticate_only) {
|
||||
__be64 temp64;
|
||||
|
||||
temp64 = cpu_to_be64(req->assoclen * 8);
|
||||
temp64 = cpu_to_be64(req_ctx->assoclen * 8);
|
||||
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
|
||||
temp64 = cpu_to_be64(cryptlen * 8);
|
||||
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
|
||||
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
|
||||
*/
|
||||
__be64 temp64;
|
||||
|
||||
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
|
||||
cryptlen) * 8);
|
||||
temp64 = cpu_to_be64((req_ctx->assoclen +
|
||||
GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
|
||||
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
|
||||
temp64 = 0;
|
||||
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
|
||||
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
|
||||
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
|
||||
GCM_BLOCK_RFC4_IV_SIZE);
|
||||
req->iv = areq_ctx->ctr_iv;
|
||||
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
|
||||
areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
|
||||
}
|
||||
|
||||
static int cc_proc_aead(struct aead_request *req,
|
||||
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
|
||||
/* Check data length according to mode */
|
||||
if (validate_data_size(ctx, direct, req)) {
|
||||
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
|
||||
req->cryptlen, req->assoclen);
|
||||
req->cryptlen, areq_ctx->assoclen);
|
||||
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc;
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
areq_ctx->is_gcm4543 = false;
|
||||
|
||||
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
|
||||
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc;
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
areq_ctx->is_gcm4543 = false;
|
||||
|
||||
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
|
||||
areq_ctx->is_gcm4543 = true;
|
||||
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc;
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
//plaintext is not encryped with rfc4543
|
||||
areq_ctx->plaintext_authenticate_only = true;
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
|
||||
areq_ctx->plaintext_authenticate_only = false;
|
||||
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc;
|
||||
|
||||
memset(areq_ctx, 0, sizeof(*areq_ctx));
|
||||
|
||||
//plaintext is not decryped with rfc4543
|
||||
areq_ctx->plaintext_authenticate_only = true;
|
||||
|
||||
/* No generated IV required */
|
||||
areq_ctx->backup_iv = req->iv;
|
||||
areq_ctx->assoclen = req->assoclen;
|
||||
areq_ctx->backup_giv = NULL;
|
||||
|
||||
cc_proc_rfc4_gcm(req);
|
||||
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
|
||||
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
|
||||
.blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.template_aead = {
|
||||
.setkey = cc_aead_setkey,
|
||||
.setkey = cc_des3_aead_setkey,
|
||||
.setauthsize = cc_aead_setauthsize,
|
||||
.encrypt = cc_aead_encrypt,
|
||||
.decrypt = cc_aead_decrypt,
|
||||
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
|
||||
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
|
||||
.blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.template_aead = {
|
||||
.setkey = cc_aead_setkey,
|
||||
.setkey = cc_des3_aead_setkey,
|
||||
.setauthsize = cc_aead_setauthsize,
|
||||
.encrypt = cc_aead_encrypt,
|
||||
.decrypt = cc_aead_decrypt,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_aead.h
|
||||
* ARM CryptoCell AEAD Crypto API
|
||||
@@ -67,6 +67,7 @@ struct aead_req_ctx {
|
||||
u8 backup_mac[MAX_MAC_SIZE];
|
||||
u8 *backup_iv; /*store iv for generated IV flow*/
|
||||
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
|
||||
u32 assoclen; /* internal assoclen */
|
||||
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
|
||||
/* buffer for internal ccm configurations */
|
||||
dma_addr_t ccm_iv0_dma_addr;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/authenc.h>
|
||||
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
u32 skip = req->assoclen + req->cryptlen;
|
||||
u32 skip = areq_ctx->assoclen + req->cryptlen;
|
||||
|
||||
if (areq_ctx->is_gcm4543)
|
||||
skip += crypto_aead_ivsize(tfm);
|
||||
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
|
||||
*/
|
||||
static unsigned int cc_get_sgl_nents(struct device *dev,
|
||||
struct scatterlist *sg_list,
|
||||
unsigned int nbytes, u32 *lbytes,
|
||||
bool *is_chained)
|
||||
unsigned int nbytes, u32 *lbytes)
|
||||
{
|
||||
unsigned int nents = 0;
|
||||
|
||||
while (nbytes && sg_list) {
|
||||
if (sg_list->length) {
|
||||
nents++;
|
||||
/* get the number of bytes in the last entry */
|
||||
*lbytes = nbytes;
|
||||
nbytes -= (sg_list->length > nbytes) ?
|
||||
nbytes : sg_list->length;
|
||||
sg_list = sg_next(sg_list);
|
||||
} else {
|
||||
sg_list = (struct scatterlist *)sg_page(sg_list);
|
||||
if (is_chained)
|
||||
*is_chained = true;
|
||||
}
|
||||
nents++;
|
||||
/* get the number of bytes in the last entry */
|
||||
*lbytes = nbytes;
|
||||
nbytes -= (sg_list->length > nbytes) ?
|
||||
nbytes : sg_list->length;
|
||||
sg_list = sg_next(sg_list);
|
||||
}
|
||||
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
|
||||
return nents;
|
||||
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
|
||||
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
|
||||
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
|
||||
{
|
||||
u32 nents, lbytes;
|
||||
u32 nents;
|
||||
|
||||
nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
|
||||
nents = sg_nents_for_len(sg, end);
|
||||
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
|
||||
(direct == CC_SG_TO_BUF));
|
||||
}
|
||||
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
|
||||
sgl_data->num_of_buffers++;
|
||||
}
|
||||
|
||||
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
u32 i, j;
|
||||
struct scatterlist *l_sg = sg;
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
if (!l_sg)
|
||||
break;
|
||||
if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
|
||||
dev_err(dev, "dma_map_page() sg buffer failed\n");
|
||||
goto err;
|
||||
}
|
||||
l_sg = sg_next(l_sg);
|
||||
}
|
||||
return nents;
|
||||
|
||||
err:
|
||||
/* Restore mapped parts */
|
||||
for (j = 0; j < i; j++) {
|
||||
if (!sg)
|
||||
break;
|
||||
dma_unmap_sg(dev, sg, 1, direction);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
unsigned int nbytes, int direction, u32 *nents,
|
||||
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
|
||||
{
|
||||
bool is_chained = false;
|
||||
|
||||
if (sg_is_last(sg)) {
|
||||
/* One entry only case -set to DLLI */
|
||||
if (dma_map_sg(dev, sg, 1, direction) != 1) {
|
||||
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
*nents = 1;
|
||||
*mapped_nents = 1;
|
||||
} else { /*sg_is_last*/
|
||||
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
|
||||
&is_chained);
|
||||
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
|
||||
if (*nents > max_sg_nents) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
*nents, max_sg_nents);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!is_chained) {
|
||||
/* In case of mmu the number of mapped nents might
|
||||
* be changed from the original sgl nents
|
||||
*/
|
||||
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
|
||||
if (*mapped_nents == 0) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "dma_map_sg() sg buffer failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
/*In this case the driver maps entry by entry so it
|
||||
* must have the same nents before and after map
|
||||
*/
|
||||
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
|
||||
direction);
|
||||
if (*mapped_nents != *nents) {
|
||||
*nents = *mapped_nents;
|
||||
dev_err(dev, "dma_map_sg() sg buffer failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* In case of mmu the number of mapped nents might
|
||||
* be changed from the original sgl nents
|
||||
*/
|
||||
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
|
||||
if (*mapped_nents == 0) {
|
||||
*nents = 0;
|
||||
dev_err(dev, "dma_map_sg() sg buffer failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
|
||||
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
|
||||
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
|
||||
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
|
||||
ivsize, DMA_TO_DEVICE);
|
||||
ivsize, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
/* Release pool */
|
||||
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
|
||||
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
||||
dump_byte_array("iv", (u8 *)info, ivsize);
|
||||
req_ctx->gen_ctx.iv_dma_addr =
|
||||
dma_map_single(dev, (void *)info,
|
||||
ivsize, DMA_TO_DEVICE);
|
||||
ivsize, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
|
||||
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
|
||||
ivsize, info);
|
||||
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
u32 dummy;
|
||||
bool chained;
|
||||
u32 size_to_unmap = 0;
|
||||
|
||||
if (areq_ctx->mac_buf_dma_addr) {
|
||||
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
|
||||
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
||||
if (areq_ctx->gen_ctx.iv_dma_addr) {
|
||||
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
|
||||
hw_iv_size, DMA_BIDIRECTIONAL);
|
||||
kzfree(areq_ctx->gen_ctx.iv);
|
||||
}
|
||||
|
||||
/* Release pool */
|
||||
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
||||
|
||||
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
|
||||
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
|
||||
req->assoclen, req->cryptlen);
|
||||
size_to_unmap = req->assoclen + req->cryptlen;
|
||||
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
|
||||
size_to_unmap += areq_ctx->req_authsize;
|
||||
if (areq_ctx->is_gcm4543)
|
||||
size_to_unmap += crypto_aead_ivsize(tfm);
|
||||
areq_ctx->assoclen, req->cryptlen);
|
||||
|
||||
dma_unmap_sg(dev, req->src,
|
||||
cc_get_sgl_nents(dev, req->src, size_to_unmap,
|
||||
&dummy, &chained),
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
|
||||
if (req->src != req->dst) {
|
||||
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
|
||||
sg_virt(req->dst));
|
||||
dma_unmap_sg(dev, req->dst,
|
||||
cc_get_sgl_nents(dev, req->dst, size_to_unmap,
|
||||
&dummy, &chained),
|
||||
dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
if (drvdata->coherent &&
|
||||
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
|
||||
unsigned int sgl_nents, unsigned int authsize,
|
||||
u32 last_entry_data_size,
|
||||
bool *is_icv_fragmented)
|
||||
static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
|
||||
u32 last_entry_data_size)
|
||||
{
|
||||
unsigned int icv_max_size = 0;
|
||||
unsigned int icv_required_size = authsize > last_entry_data_size ?
|
||||
(authsize - last_entry_data_size) :
|
||||
authsize;
|
||||
unsigned int nents;
|
||||
unsigned int i;
|
||||
|
||||
if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
|
||||
*is_icv_fragmented = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
|
||||
if (!sgl)
|
||||
break;
|
||||
sgl = sg_next(sgl);
|
||||
}
|
||||
|
||||
if (sgl)
|
||||
icv_max_size = sgl->length;
|
||||
|
||||
if (last_entry_data_size > authsize) {
|
||||
/* ICV attached to data in last entry (not fragmented!) */
|
||||
nents = 0;
|
||||
*is_icv_fragmented = false;
|
||||
} else if (last_entry_data_size == authsize) {
|
||||
/* ICV placed in whole last entry (not fragmented!) */
|
||||
nents = 1;
|
||||
*is_icv_fragmented = false;
|
||||
} else if (icv_max_size > icv_required_size) {
|
||||
nents = 1;
|
||||
*is_icv_fragmented = true;
|
||||
} else if (icv_max_size == icv_required_size) {
|
||||
nents = 2;
|
||||
*is_icv_fragmented = true;
|
||||
} else {
|
||||
dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
|
||||
MAX_ICV_NENTS_SUPPORTED);
|
||||
nents = -1; /*unsupported*/
|
||||
}
|
||||
dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
|
||||
(*is_icv_fragmented ? "true" : "false"), nents);
|
||||
|
||||
return nents;
|
||||
return ((sgl_nents > 1) && (last_entry_data_size < authsize));
|
||||
}
|
||||
|
||||
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
|
||||
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
gfp_t flags = cc_gfp_flags(&req->base);
|
||||
int rc = 0;
|
||||
|
||||
if (!req->iv) {
|
||||
areq_ctx->gen_ctx.iv_dma_addr = 0;
|
||||
areq_ctx->gen_ctx.iv = NULL;
|
||||
goto chain_iv_exit;
|
||||
}
|
||||
|
||||
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
|
||||
hw_iv_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
|
||||
if (!areq_ctx->gen_ctx.iv)
|
||||
return -ENOMEM;
|
||||
|
||||
areq_ctx->gen_ctx.iv_dma_addr =
|
||||
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
|
||||
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
|
||||
hw_iv_size, req->iv);
|
||||
kzfree(areq_ctx->gen_ctx.iv);
|
||||
areq_ctx->gen_ctx.iv = NULL;
|
||||
rc = -ENOMEM;
|
||||
goto chain_iv_exit;
|
||||
}
|
||||
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
int rc = 0;
|
||||
u32 mapped_nents = 0;
|
||||
struct scatterlist *current_sg = req->src;
|
||||
int mapped_nents = 0;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
unsigned int sg_index = 0;
|
||||
u32 size_of_assoc = req->assoclen;
|
||||
unsigned int size_of_assoc = areq_ctx->assoclen;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
if (areq_ctx->is_gcm4543)
|
||||
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
goto chain_assoc_exit;
|
||||
}
|
||||
|
||||
if (req->assoclen == 0) {
|
||||
if (areq_ctx->assoclen == 0) {
|
||||
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
|
||||
areq_ctx->assoc.nents = 0;
|
||||
areq_ctx->assoc.mlli_nents = 0;
|
||||
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
goto chain_assoc_exit;
|
||||
}
|
||||
|
||||
//iterate over the sgl to see how many entries are for associated data
|
||||
//it is assumed that if we reach here , the sgl is already mapped
|
||||
sg_index = current_sg->length;
|
||||
//the first entry in the scatter list contains all the associated data
|
||||
if (sg_index > size_of_assoc) {
|
||||
mapped_nents++;
|
||||
} else {
|
||||
while (sg_index <= size_of_assoc) {
|
||||
current_sg = sg_next(current_sg);
|
||||
/* if have reached the end of the sgl, then this is
|
||||
* unexpected
|
||||
*/
|
||||
if (!current_sg) {
|
||||
dev_err(dev, "reached end of sg list. unexpected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sg_index += current_sg->length;
|
||||
mapped_nents++;
|
||||
}
|
||||
}
|
||||
mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
|
||||
if (mapped_nents < 0)
|
||||
return mapped_nents;
|
||||
|
||||
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
|
||||
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
|
||||
cc_dma_buf_type(areq_ctx->assoc_buff_type),
|
||||
areq_ctx->assoc.nents);
|
||||
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
|
||||
req->assoclen, 0, is_last,
|
||||
areq_ctx->assoclen, 0, is_last,
|
||||
&areq_ctx->assoc.mlli_nents);
|
||||
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
|
||||
}
|
||||
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
|
||||
unsigned int authsize = areq_ctx->req_authsize;
|
||||
struct scatterlist *sg;
|
||||
ssize_t offset;
|
||||
|
||||
areq_ctx->is_icv_fragmented = false;
|
||||
if (req->src == req->dst) {
|
||||
/*INPLACE*/
|
||||
areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
|
||||
(*src_last_bytes - authsize);
|
||||
areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
|
||||
(*src_last_bytes - authsize);
|
||||
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
|
||||
/*NON-INPLACE and DECRYPT*/
|
||||
areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
|
||||
(*src_last_bytes - authsize);
|
||||
areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
|
||||
(*src_last_bytes - authsize);
|
||||
|
||||
if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
|
||||
sg = areq_ctx->src_sgl;
|
||||
offset = *src_last_bytes - authsize;
|
||||
} else {
|
||||
/*NON-INPLACE and ENCRYPT*/
|
||||
areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
|
||||
(*dst_last_bytes - authsize);
|
||||
areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
|
||||
(*dst_last_bytes - authsize);
|
||||
sg = areq_ctx->dst_sgl;
|
||||
offset = *dst_last_bytes - authsize;
|
||||
}
|
||||
|
||||
areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
|
||||
areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
|
||||
}
|
||||
|
||||
static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
struct aead_request *req,
|
||||
struct buffer_array *sg_data,
|
||||
u32 *src_last_bytes, u32 *dst_last_bytes,
|
||||
bool is_last_table)
|
||||
static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
struct aead_request *req,
|
||||
struct buffer_array *sg_data,
|
||||
u32 *src_last_bytes, u32 *dst_last_bytes,
|
||||
bool is_last_table)
|
||||
{
|
||||
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
|
||||
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
|
||||
unsigned int authsize = areq_ctx->req_authsize;
|
||||
int rc = 0, icv_nents;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct scatterlist *sg;
|
||||
|
||||
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
areq_ctx->src_offset, is_last_table,
|
||||
&areq_ctx->src.mlli_nents);
|
||||
|
||||
icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
|
||||
areq_ctx->src.nents,
|
||||
authsize, *src_last_bytes,
|
||||
&areq_ctx->is_icv_fragmented);
|
||||
if (icv_nents < 0) {
|
||||
rc = -ENOTSUPP;
|
||||
goto prepare_data_mlli_exit;
|
||||
}
|
||||
areq_ctx->is_icv_fragmented =
|
||||
cc_is_icv_frag(areq_ctx->src.nents, authsize,
|
||||
*src_last_bytes);
|
||||
|
||||
if (areq_ctx->is_icv_fragmented) {
|
||||
/* Backup happens only when ICV is fragmented, ICV
|
||||
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
areq_ctx->dst_offset, is_last_table,
|
||||
&areq_ctx->dst.mlli_nents);
|
||||
|
||||
icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
|
||||
areq_ctx->src.nents,
|
||||
authsize, *src_last_bytes,
|
||||
&areq_ctx->is_icv_fragmented);
|
||||
if (icv_nents < 0) {
|
||||
rc = -ENOTSUPP;
|
||||
goto prepare_data_mlli_exit;
|
||||
}
|
||||
|
||||
areq_ctx->is_icv_fragmented =
|
||||
cc_is_icv_frag(areq_ctx->src.nents, authsize,
|
||||
*src_last_bytes);
|
||||
/* Backup happens only when ICV is fragmented, ICV
|
||||
|
||||
* verification is made by CPU compare in order to simplify
|
||||
* MAC verification upon request completion
|
||||
*/
|
||||
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
areq_ctx->src_offset, is_last_table,
|
||||
&areq_ctx->src.mlli_nents);
|
||||
|
||||
icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
|
||||
areq_ctx->dst.nents,
|
||||
authsize, *dst_last_bytes,
|
||||
&areq_ctx->is_icv_fragmented);
|
||||
if (icv_nents < 0) {
|
||||
rc = -ENOTSUPP;
|
||||
goto prepare_data_mlli_exit;
|
||||
}
|
||||
areq_ctx->is_icv_fragmented =
|
||||
cc_is_icv_frag(areq_ctx->dst.nents, authsize,
|
||||
*dst_last_bytes);
|
||||
|
||||
if (!areq_ctx->is_icv_fragmented) {
|
||||
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
|
||||
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
|
||||
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
|
||||
}
|
||||
}
|
||||
|
||||
prepare_data_mlli_exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
|
||||
u32 offset = 0;
|
||||
/* non-inplace mode */
|
||||
unsigned int size_for_map = req->assoclen + req->cryptlen;
|
||||
unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
u32 sg_index = 0;
|
||||
bool chained = false;
|
||||
bool is_gcm4543 = areq_ctx->is_gcm4543;
|
||||
u32 size_to_skip = req->assoclen;
|
||||
u32 size_to_skip = areq_ctx->assoclen;
|
||||
struct scatterlist *sgl;
|
||||
|
||||
if (is_gcm4543)
|
||||
size_to_skip += crypto_aead_ivsize(tfm);
|
||||
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
|
||||
authsize : 0;
|
||||
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
|
||||
&src_last_bytes, &chained);
|
||||
&src_last_bytes);
|
||||
sg_index = areq_ctx->src_sgl->length;
|
||||
//check where the data starts
|
||||
while (sg_index <= size_to_skip) {
|
||||
offset -= areq_ctx->src_sgl->length;
|
||||
areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
|
||||
//if have reached the end of the sgl, then this is unexpected
|
||||
if (!areq_ctx->src_sgl) {
|
||||
dev_err(dev, "reached end of sg list. unexpected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sg_index += areq_ctx->src_sgl->length;
|
||||
src_mapped_nents--;
|
||||
offset -= areq_ctx->src_sgl->length;
|
||||
sgl = sg_next(areq_ctx->src_sgl);
|
||||
if (!sgl)
|
||||
break;
|
||||
areq_ctx->src_sgl = sgl;
|
||||
sg_index += areq_ctx->src_sgl->length;
|
||||
}
|
||||
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
areq_ctx->src_offset = offset;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
size_for_map = req->assoclen + req->cryptlen;
|
||||
size_for_map = areq_ctx->assoclen + req->cryptlen;
|
||||
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
|
||||
authsize : 0;
|
||||
if (is_gcm4543)
|
||||
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
}
|
||||
|
||||
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
|
||||
&dst_last_bytes, &chained);
|
||||
&dst_last_bytes);
|
||||
sg_index = areq_ctx->dst_sgl->length;
|
||||
offset = size_to_skip;
|
||||
|
||||
//check where the data starts
|
||||
while (sg_index <= size_to_skip) {
|
||||
offset -= areq_ctx->dst_sgl->length;
|
||||
areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
|
||||
//if have reached the end of the sgl, then this is unexpected
|
||||
if (!areq_ctx->dst_sgl) {
|
||||
dev_err(dev, "reached end of sg list. unexpected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sg_index += areq_ctx->dst_sgl->length;
|
||||
dst_mapped_nents--;
|
||||
offset -= areq_ctx->dst_sgl->length;
|
||||
sgl = sg_next(areq_ctx->dst_sgl);
|
||||
if (!sgl)
|
||||
break;
|
||||
areq_ctx->dst_sgl = sgl;
|
||||
sg_index += areq_ctx->dst_sgl->length;
|
||||
}
|
||||
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
|
||||
dev_err(dev, "Too many fragments. current %d max %d\n",
|
||||
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
|
||||
dst_mapped_nents > 1 ||
|
||||
do_chain) {
|
||||
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
|
||||
rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
|
||||
&src_last_bytes,
|
||||
&dst_last_bytes, is_last_table);
|
||||
cc_prepare_aead_data_mlli(drvdata, req, sg_data,
|
||||
&src_last_bytes, &dst_last_bytes,
|
||||
is_last_table);
|
||||
} else {
|
||||
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
|
||||
cc_prepare_aead_data_dlli(req, &src_last_bytes,
|
||||
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||
areq_ctx->ccm_iv0_dma_addr = dma_addr;
|
||||
|
||||
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
|
||||
&sg_data, req->assoclen);
|
||||
&sg_data, areq_ctx->assoclen);
|
||||
if (rc)
|
||||
goto aead_map_failure;
|
||||
}
|
||||
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
|
||||
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
|
||||
}
|
||||
|
||||
size_to_map = req->cryptlen + req->assoclen;
|
||||
size_to_map = req->cryptlen + areq_ctx->assoclen;
|
||||
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
|
||||
size_to_map += authsize;
|
||||
|
||||
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
|
||||
if (total_in_len < block_size) {
|
||||
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
|
||||
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
|
||||
areq_ctx->in_nents =
|
||||
cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
|
||||
areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
|
||||
sg_copy_to_buffer(src, areq_ctx->in_nents,
|
||||
&curr_buff[*curr_buff_cnt], nbytes);
|
||||
*curr_buff_cnt += nbytes;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_buffer_mgr.h
|
||||
* Buffer Manager
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
|
||||
enum cc_hw_crypto_key key2_slot;
|
||||
};
|
||||
|
||||
struct cc_cpp_key_info {
|
||||
u8 slot;
|
||||
enum cc_cpp_alg alg;
|
||||
};
|
||||
|
||||
enum cc_key_type {
|
||||
CC_UNPROTECTED_KEY, /* User key */
|
||||
CC_HW_PROTECTED_KEY, /* HW (FDE) key */
|
||||
CC_POLICY_PROTECTED_KEY, /* CPP key */
|
||||
CC_INVALID_PROTECTED_KEY /* Invalid key */
|
||||
};
|
||||
|
||||
struct cc_cipher_ctx {
|
||||
struct cc_drvdata *drvdata;
|
||||
int keylen;
|
||||
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
|
||||
int cipher_mode;
|
||||
int flow_mode;
|
||||
unsigned int flags;
|
||||
bool hw_key;
|
||||
enum cc_key_type key_type;
|
||||
struct cc_user_key_info user;
|
||||
struct cc_hw_key_info hw;
|
||||
union {
|
||||
struct cc_hw_key_info hw;
|
||||
struct cc_cpp_key_info cpp;
|
||||
};
|
||||
struct crypto_shash *shash_tfm;
|
||||
};
|
||||
|
||||
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
|
||||
|
||||
static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
|
||||
static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
|
||||
return ctx_p->hw_key;
|
||||
return ctx_p->key_type;
|
||||
}
|
||||
|
||||
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
|
||||
@@ -232,7 +247,7 @@ struct tdes_keys {
|
||||
u8 key3[DES_KEY_SIZE];
|
||||
};
|
||||
|
||||
static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
|
||||
static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
|
||||
{
|
||||
switch (slot_num) {
|
||||
case 0:
|
||||
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
|
||||
return END_OF_KEYS;
|
||||
}
|
||||
|
||||
static u8 cc_slot_to_cpp_key(u8 slot_num)
|
||||
{
|
||||
return (slot_num - CC_FIRST_CPP_KEY_SLOT);
|
||||
}
|
||||
|
||||
static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
|
||||
{
|
||||
if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
|
||||
return CC_HW_PROTECTED_KEY;
|
||||
else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
|
||||
slot_num <= CC_LAST_CPP_KEY_SLOT)
|
||||
return CC_POLICY_PROTECTED_KEY;
|
||||
else
|
||||
return CC_INVALID_PROTECTED_KEY;
|
||||
}
|
||||
|
||||
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
|
||||
/* STAT_PHASE_0: Init and sanity checks */
|
||||
|
||||
/* This check the size of the hardware key token */
|
||||
/* This check the size of the protected key token */
|
||||
if (keylen != sizeof(hki)) {
|
||||
dev_err(dev, "Unsupported HW key size %d.\n", keylen);
|
||||
dev_err(dev, "Unsupported protected key size %d.\n", keylen);
|
||||
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx_p->flow_mode != S_DIN_to_AES) {
|
||||
dev_err(dev, "HW key not supported for non-AES flows\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&hki, key, keylen);
|
||||
|
||||
/* The real key len for crypto op is the size of the HW key
|
||||
@@ -286,32 +312,71 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
|
||||
if (ctx_p->hw.key1_slot == END_OF_KEYS) {
|
||||
dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
|
||||
ctx_p->keylen = keylen;
|
||||
|
||||
switch (cc_slot_to_key_type(hki.hw_key1)) {
|
||||
case CC_HW_PROTECTED_KEY:
|
||||
if (ctx_p->flow_mode == S_DIN_to_SM4) {
|
||||
dev_err(dev, "Only AES HW protected keys are supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
|
||||
if (ctx_p->hw.key1_slot == END_OF_KEYS) {
|
||||
dev_err(dev, "Unsupported hw key1 number (%d)\n",
|
||||
hki.hw_key1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
|
||||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
|
||||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
|
||||
if (hki.hw_key1 == hki.hw_key2) {
|
||||
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
|
||||
hki.hw_key1, hki.hw_key2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
|
||||
if (ctx_p->hw.key2_slot == END_OF_KEYS) {
|
||||
dev_err(dev, "Unsupported hw key2 number (%d)\n",
|
||||
hki.hw_key2);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ctx_p->key_type = CC_HW_PROTECTED_KEY;
|
||||
dev_dbg(dev, "HW protected key %d/%d set\n.",
|
||||
ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
|
||||
break;
|
||||
|
||||
case CC_POLICY_PROTECTED_KEY:
|
||||
if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
|
||||
dev_err(dev, "CPP keys not supported in this hardware revision.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
|
||||
ctx_p->cipher_mode != DRV_CIPHER_CTR) {
|
||||
dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
|
||||
if (ctx_p->flow_mode == S_DIN_to_AES)
|
||||
ctx_p->cpp.alg = CC_CPP_AES;
|
||||
else /* Must be SM4 since due to sethkey registration */
|
||||
ctx_p->cpp.alg = CC_CPP_SM4;
|
||||
ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
|
||||
dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
|
||||
ctx_p->cpp.alg, ctx_p->cpp.slot);
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
|
||||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
|
||||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
|
||||
if (hki.hw_key1 == hki.hw_key2) {
|
||||
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
|
||||
hki.hw_key1, hki.hw_key2);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
|
||||
if (ctx_p->hw.key2_slot == END_OF_KEYS) {
|
||||
dev_err(dev, "Unsupported hw key2 number (%d)\n",
|
||||
hki.hw_key2);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ctx_p->keylen = keylen;
|
||||
ctx_p->hw_key = true;
|
||||
dev_dbg(dev, "cc_is_hw_key ret 0");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
u32 tmp[DES3_EDE_EXPKEY_WORDS];
|
||||
struct cc_crypto_alg *cc_alg =
|
||||
container_of(tfm->__crt_alg, struct cc_crypto_alg,
|
||||
skcipher_alg.base);
|
||||
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_p->hw_key = false;
|
||||
ctx_p->key_type = CC_UNPROTECTED_KEY;
|
||||
|
||||
/*
|
||||
* Verify DES weak keys
|
||||
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
* HW does the expansion on its own.
|
||||
*/
|
||||
if (ctx_p->flow_mode == S_DIN_to_DES) {
|
||||
u32 tmp[DES3_EDE_EXPKEY_WORDS];
|
||||
if (keylen == DES3_EDE_KEY_SIZE &&
|
||||
__des3_ede_setkey(tmp, &tfm->crt_flags, key,
|
||||
DES3_EDE_KEY_SIZE)) {
|
||||
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
|
||||
static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
|
||||
{
|
||||
switch (ctx_p->flow_mode) {
|
||||
case S_DIN_to_AES:
|
||||
return S_AES_to_DOUT;
|
||||
case S_DIN_to_DES:
|
||||
return S_DES_to_DOUT;
|
||||
case S_DIN_to_SM4:
|
||||
return S_SM4_to_DOUT;
|
||||
default:
|
||||
return ctx_p->flow_mode;
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
unsigned int ivsize, struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
int cipher_mode = ctx_p->cipher_mode;
|
||||
int flow_mode = cc_out_setup_mode(ctx_p);
|
||||
int direction = req_ctx->gen_ctx.op_type;
|
||||
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
|
||||
|
||||
if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
|
||||
return;
|
||||
|
||||
switch (cipher_mode) {
|
||||
case DRV_CIPHER_ECB:
|
||||
break;
|
||||
case DRV_CIPHER_CBC:
|
||||
case DRV_CIPHER_CBC_CTS:
|
||||
case DRV_CIPHER_CTR:
|
||||
case DRV_CIPHER_OFB:
|
||||
/* Read next IV */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
if (cipher_mode == DRV_CIPHER_CTR ||
|
||||
cipher_mode == DRV_CIPHER_OFB) {
|
||||
set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
|
||||
} else {
|
||||
set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
|
||||
}
|
||||
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
|
||||
(*seq_size)++;
|
||||
break;
|
||||
case DRV_CIPHER_XTS:
|
||||
case DRV_CIPHER_ESSIV:
|
||||
case DRV_CIPHER_BITLOCKER:
|
||||
/* IV */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
|
||||
NS_BIT, 1);
|
||||
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
|
||||
(*seq_size)++;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_state_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
unsigned int ivsize, unsigned int nbytes,
|
||||
struct cc_hw_desc desc[],
|
||||
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
|
||||
du_size = cc_alg->data_unit;
|
||||
|
||||
switch (cipher_mode) {
|
||||
case DRV_CIPHER_ECB:
|
||||
break;
|
||||
case DRV_CIPHER_CBC:
|
||||
case DRV_CIPHER_CBC_CTS:
|
||||
case DRV_CIPHER_CTR:
|
||||
case DRV_CIPHER_OFB:
|
||||
/* Load cipher state */
|
||||
/* Load IV */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
|
||||
NS_BIT);
|
||||
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
|
||||
}
|
||||
(*seq_size)++;
|
||||
/*FALLTHROUGH*/
|
||||
case DRV_CIPHER_ECB:
|
||||
/* Load key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
if (flow_mode == S_DIN_to_AES) {
|
||||
if (cc_is_hw_key(tfm)) {
|
||||
set_hw_crypto_key(&desc[*seq_size],
|
||||
ctx_p->hw.key1_slot);
|
||||
} else {
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI,
|
||||
key_dma_addr, ((key_len == 24) ?
|
||||
AES_MAX_KEY_SIZE :
|
||||
key_len), NS_BIT);
|
||||
}
|
||||
set_key_size_aes(&desc[*seq_size], key_len);
|
||||
} else {
|
||||
/*des*/
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
|
||||
key_len, NS_BIT);
|
||||
set_key_size_des(&desc[*seq_size], key_len);
|
||||
}
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
|
||||
(*seq_size)++;
|
||||
break;
|
||||
case DRV_CIPHER_XTS:
|
||||
case DRV_CIPHER_ESSIV:
|
||||
case DRV_CIPHER_BITLOCKER:
|
||||
/* Load AES key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
if (cc_is_hw_key(tfm)) {
|
||||
set_hw_crypto_key(&desc[*seq_size],
|
||||
ctx_p->hw.key1_slot);
|
||||
} else {
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
|
||||
(key_len / 2), NS_BIT);
|
||||
}
|
||||
set_key_size_aes(&desc[*seq_size], (key_len / 2));
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
|
||||
(*seq_size)++;
|
||||
|
||||
/* load XEX key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
if (cc_is_hw_key(tfm)) {
|
||||
if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
|
||||
set_hw_crypto_key(&desc[*seq_size],
|
||||
ctx_p->hw.key2_slot);
|
||||
} else {
|
||||
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
|
||||
(*seq_size)++;
|
||||
|
||||
/* Set state */
|
||||
/* Load IV */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_cipher_data(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
void *areq, struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
|
||||
{
|
||||
switch (ctx_p->flow_mode) {
|
||||
case S_DIN_to_AES:
|
||||
return DIN_AES_DOUT;
|
||||
case S_DIN_to_DES:
|
||||
return DIN_DES_DOUT;
|
||||
case S_DIN_to_SM4:
|
||||
return DIN_SM4_DOUT;
|
||||
default:
|
||||
return ctx_p->flow_mode;
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_key_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
unsigned int nbytes, struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
unsigned int flow_mode = ctx_p->flow_mode;
|
||||
int cipher_mode = ctx_p->cipher_mode;
|
||||
int flow_mode = ctx_p->flow_mode;
|
||||
int direction = req_ctx->gen_ctx.op_type;
|
||||
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
|
||||
unsigned int key_len = ctx_p->keylen;
|
||||
unsigned int din_size;
|
||||
|
||||
switch (ctx_p->flow_mode) {
|
||||
case S_DIN_to_AES:
|
||||
flow_mode = DIN_AES_DOUT;
|
||||
break;
|
||||
case S_DIN_to_DES:
|
||||
flow_mode = DIN_DES_DOUT;
|
||||
break;
|
||||
case S_DIN_to_SM4:
|
||||
flow_mode = DIN_SM4_DOUT;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
|
||||
return;
|
||||
}
|
||||
/* Process */
|
||||
if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
|
||||
dev_dbg(dev, " data params addr %pad length 0x%X\n",
|
||||
&sg_dma_address(src), nbytes);
|
||||
dev_dbg(dev, " data params addr %pad length 0x%X\n",
|
||||
&sg_dma_address(dst), nbytes);
|
||||
switch (cipher_mode) {
|
||||
case DRV_CIPHER_CBC:
|
||||
case DRV_CIPHER_CBC_CTS:
|
||||
case DRV_CIPHER_CTR:
|
||||
case DRV_CIPHER_OFB:
|
||||
case DRV_CIPHER_ECB:
|
||||
/* Load key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
|
||||
nbytes, NS_BIT);
|
||||
set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
|
||||
nbytes, NS_BIT, (!areq ? 0 : 1));
|
||||
if (areq)
|
||||
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
|
||||
if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
|
||||
/* We use the AES key size coding for all CPP algs */
|
||||
set_key_size_aes(&desc[*seq_size], key_len);
|
||||
set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
|
||||
flow_mode = cc_out_flow_mode(ctx_p);
|
||||
} else {
|
||||
if (flow_mode == S_DIN_to_AES) {
|
||||
if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
|
||||
set_hw_crypto_key(&desc[*seq_size],
|
||||
ctx_p->hw.key1_slot);
|
||||
} else {
|
||||
/* CC_POLICY_UNPROTECTED_KEY
|
||||
* Invalid keys are filtered out in
|
||||
* sethkey()
|
||||
*/
|
||||
din_size = (key_len == 24) ?
|
||||
AES_MAX_KEY_SIZE : key_len;
|
||||
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI,
|
||||
key_dma_addr, din_size,
|
||||
NS_BIT);
|
||||
}
|
||||
set_key_size_aes(&desc[*seq_size], key_len);
|
||||
} else {
|
||||
/*des*/
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI,
|
||||
key_dma_addr, key_len, NS_BIT);
|
||||
set_key_size_des(&desc[*seq_size], key_len);
|
||||
}
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
|
||||
}
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
(*seq_size)++;
|
||||
} else {
|
||||
break;
|
||||
case DRV_CIPHER_XTS:
|
||||
case DRV_CIPHER_ESSIV:
|
||||
case DRV_CIPHER_BITLOCKER:
|
||||
/* Load AES key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_cipher_mode(&desc[*seq_size], cipher_mode);
|
||||
set_cipher_config0(&desc[*seq_size], direction);
|
||||
if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
|
||||
set_hw_crypto_key(&desc[*seq_size],
|
||||
ctx_p->hw.key1_slot);
|
||||
} else {
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
|
||||
(key_len / 2), NS_BIT);
|
||||
}
|
||||
set_key_size_aes(&desc[*seq_size], (key_len / 2));
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
|
||||
(*seq_size)++;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes, void *areq,
|
||||
struct cc_hw_desc desc[], unsigned int *seq_size)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
|
||||
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
|
||||
/* bypass */
|
||||
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
|
||||
&req_ctx->mlli_params.mlli_dma_addr,
|
||||
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
|
||||
req_ctx->mlli_params.mlli_len);
|
||||
set_flow_mode(&desc[*seq_size], BYPASS);
|
||||
(*seq_size)++;
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_setup_flow_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes, struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
unsigned int flow_mode = cc_out_flow_mode(ctx_p);
|
||||
bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
|
||||
ctx_p->cipher_mode == DRV_CIPHER_ECB);
|
||||
|
||||
/* Process */
|
||||
if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
|
||||
dev_dbg(dev, " data params addr %pad length 0x%X\n",
|
||||
&sg_dma_address(src), nbytes);
|
||||
dev_dbg(dev, " data params addr %pad length 0x%X\n",
|
||||
&sg_dma_address(dst), nbytes);
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
|
||||
nbytes, NS_BIT);
|
||||
set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
|
||||
nbytes, NS_BIT, (!last_desc ? 0 : 1));
|
||||
if (last_desc)
|
||||
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
|
||||
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
(*seq_size)++;
|
||||
} else {
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
set_din_type(&desc[*seq_size], DMA_MLLI,
|
||||
ctx_p->drvdata->mlli_sram_addr,
|
||||
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
|
||||
set_dout_mlli(&desc[*seq_size],
|
||||
ctx_p->drvdata->mlli_sram_addr,
|
||||
req_ctx->in_mlli_nents, NS_BIT,
|
||||
(!areq ? 0 : 1));
|
||||
(!last_desc ? 0 : 1));
|
||||
} else {
|
||||
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
|
||||
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
|
||||
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
|
||||
(LLI_ENTRY_BYTE_SIZE *
|
||||
req_ctx->in_mlli_nents)),
|
||||
req_ctx->out_mlli_nents, NS_BIT,
|
||||
(!areq ? 0 : 1));
|
||||
(!last_desc ? 0 : 1));
|
||||
}
|
||||
if (areq)
|
||||
if (last_desc)
|
||||
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
|
||||
|
||||
set_flow_mode(&desc[*seq_size], flow_mode);
|
||||
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update a CTR-AES 128 bit counter
|
||||
*/
|
||||
static void cc_update_ctr(u8 *ctr, unsigned int increment)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
IS_ALIGNED((unsigned long)ctr, 8)) {
|
||||
|
||||
__be64 *high_be = (__be64 *)ctr;
|
||||
__be64 *low_be = high_be + 1;
|
||||
u64 orig_low = __be64_to_cpu(*low_be);
|
||||
u64 new_low = orig_low + (u64)increment;
|
||||
|
||||
*low_be = __cpu_to_be64(new_low);
|
||||
|
||||
if (new_low < orig_low)
|
||||
*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
|
||||
} else {
|
||||
u8 *pos = (ctr + AES_BLOCK_SIZE);
|
||||
u8 val;
|
||||
unsigned int size;
|
||||
|
||||
for (; increment; increment--)
|
||||
for (size = AES_BLOCK_SIZE; size; size--) {
|
||||
val = *--pos + 1;
|
||||
*pos = val;
|
||||
if (val)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
|
||||
{
|
||||
struct skcipher_request *req = (struct skcipher_request *)cc_req;
|
||||
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
|
||||
struct scatterlist *src = req->src;
|
||||
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
|
||||
unsigned int len;
|
||||
|
||||
cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
|
||||
|
||||
switch (ctx_p->cipher_mode) {
|
||||
case DRV_CIPHER_CBC:
|
||||
/*
|
||||
* The crypto API expects us to set the req->iv to the last
|
||||
* ciphertext block. For encrypt, simply copy from the result.
|
||||
* For decrypt, we must copy from a saved buffer since this
|
||||
* could be an in-place decryption operation and the src is
|
||||
* lost by this point.
|
||||
*/
|
||||
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
|
||||
memcpy(req->iv, req_ctx->backup_info, ivsize);
|
||||
kzfree(req_ctx->backup_info);
|
||||
} else if (!err) {
|
||||
len = req->cryptlen - ivsize;
|
||||
scatterwalk_map_and_copy(req->iv, req->dst, len,
|
||||
ivsize, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
case DRV_CIPHER_CTR:
|
||||
/* Compute the counter of the last block */
|
||||
len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
|
||||
cc_update_ctr((u8 *)req->iv, len);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
if (err != -EINPROGRESS) {
|
||||
/* Not a BACKLOG notification */
|
||||
cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
|
||||
memcpy(req->iv, req_ctx->iv, ivsize);
|
||||
kzfree(req_ctx->iv);
|
||||
}
|
||||
|
||||
kzfree(req_ctx->iv);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
cc_req.user_cb = (void *)cc_cipher_complete;
|
||||
cc_req.user_arg = (void *)req;
|
||||
|
||||
/* Setup CPP operation details */
|
||||
if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
|
||||
cc_req.cpp.is_cpp = true;
|
||||
cc_req.cpp.alg = ctx_p->cpp.alg;
|
||||
cc_req.cpp.slot = ctx_p->cpp.slot;
|
||||
}
|
||||
|
||||
/* Setup request context */
|
||||
req_ctx->gen_ctx.op_type = direction;
|
||||
|
||||
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
|
||||
/* STAT_PHASE_2: Create sequence */
|
||||
|
||||
/* Setup processing */
|
||||
cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
|
||||
/* Setup IV and XEX key used */
|
||||
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
|
||||
/* Setup MLLI line, if needed */
|
||||
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
|
||||
/* Setup key */
|
||||
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
|
||||
/* Data processing */
|
||||
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
|
||||
&seq_len);
|
||||
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
|
||||
/* Read next IV */
|
||||
cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
|
||||
|
||||
/* STAT_PHASE_3: Lock HW and push sequence */
|
||||
|
||||
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
|
||||
exit_process:
|
||||
if (rc != -EINPROGRESS && rc != -EBUSY) {
|
||||
kzfree(req_ctx->backup_info);
|
||||
kzfree(req_ctx->iv);
|
||||
}
|
||||
|
||||
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
|
||||
|
||||
static int cc_cipher_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
|
||||
gfp_t flags = cc_gfp_flags(&req->base);
|
||||
unsigned int len;
|
||||
|
||||
memset(req_ctx, 0, sizeof(*req_ctx));
|
||||
|
||||
if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
|
||||
(req->cryptlen >= ivsize)) {
|
||||
|
||||
/* Allocate and save the last IV sized bytes of the source,
|
||||
* which will be lost in case of in-place decryption.
|
||||
*/
|
||||
req_ctx->backup_info = kzalloc(ivsize, flags);
|
||||
if (!req_ctx->backup_info)
|
||||
return -ENOMEM;
|
||||
|
||||
len = req->cryptlen - ivsize;
|
||||
scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
|
||||
ivsize, 0);
|
||||
}
|
||||
|
||||
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
|
||||
}
|
||||
|
||||
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_630,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "xts512(paes)",
|
||||
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 512,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "xts4096(paes)",
|
||||
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 4096,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "essiv(paes)",
|
||||
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "essiv512(paes)",
|
||||
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 512,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "essiv4096(paes)",
|
||||
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 4096,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "bitlocker(paes)",
|
||||
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "bitlocker512(paes)",
|
||||
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 512,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "bitlocker4096(paes)",
|
||||
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.data_unit = 4096,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "ecb(paes)",
|
||||
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "cbc(paes)",
|
||||
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "ofb(paes)",
|
||||
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "cts(cbc(paes))",
|
||||
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "ctr(paes)",
|
||||
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.flow_mode = S_DIN_to_AES,
|
||||
.min_hw_rev = CC_HW_REV_712,
|
||||
.std_body = CC_STD_NIST,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "xts(aes)",
|
||||
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
|
||||
.min_hw_rev = CC_HW_REV_713,
|
||||
.std_body = CC_STD_OSCCA,
|
||||
},
|
||||
{
|
||||
.name = "cbc(psm4)",
|
||||
.driver_name = "cbc-psm4-ccree",
|
||||
.blocksize = SM4_BLOCK_SIZE,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_sethkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
.decrypt = cc_cipher_decrypt,
|
||||
.min_keysize = CC_HW_KEY_SIZE,
|
||||
.max_keysize = CC_HW_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_mode = DRV_CIPHER_CBC,
|
||||
.flow_mode = S_DIN_to_SM4,
|
||||
.min_hw_rev = CC_HW_REV_713,
|
||||
.std_body = CC_STD_OSCCA,
|
||||
.sec_func = true,
|
||||
},
|
||||
{
|
||||
.name = "ctr(psm4)",
|
||||
.driver_name = "ctr-psm4-ccree",
|
||||
.blocksize = SM4_BLOCK_SIZE,
|
||||
.template_skcipher = {
|
||||
.setkey = cc_cipher_sethkey,
|
||||
.encrypt = cc_cipher_encrypt,
|
||||
.decrypt = cc_cipher_decrypt,
|
||||
.min_keysize = CC_HW_KEY_SIZE,
|
||||
.max_keysize = CC_HW_KEY_SIZE,
|
||||
.ivsize = SM4_BLOCK_SIZE,
|
||||
},
|
||||
.cipher_mode = DRV_CIPHER_CTR,
|
||||
.flow_mode = S_DIN_to_SM4,
|
||||
.min_hw_rev = CC_HW_REV_713,
|
||||
.std_body = CC_STD_OSCCA,
|
||||
.sec_func = true,
|
||||
},
|
||||
};
|
||||
|
||||
static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
|
||||
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
|
||||
ARRAY_SIZE(skcipher_algs));
|
||||
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
|
||||
if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
|
||||
!(drvdata->std_bodies & skcipher_algs[alg].std_body))
|
||||
!(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
|
||||
(drvdata->sec_disabled && skcipher_algs[alg].sec_func))
|
||||
continue;
|
||||
|
||||
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_cipher.h
|
||||
* ARM CryptoCell Cipher Crypto API
|
||||
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
|
||||
u32 in_mlli_nents;
|
||||
u32 out_nents;
|
||||
u32 out_mlli_nents;
|
||||
u8 *backup_info; /*store iv for generated IV flow*/
|
||||
u8 *iv;
|
||||
struct mlli_params mlli_params;
|
||||
};
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef _CC_CRYPTO_CTX_H_
|
||||
#define _CC_CRYPTO_CTX_H_
|
||||
@@ -55,6 +55,14 @@
|
||||
|
||||
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
|
||||
|
||||
#define CC_CPP_NUM_SLOTS 8
|
||||
#define CC_CPP_NUM_ALGS 2
|
||||
|
||||
enum cc_cpp_alg {
|
||||
CC_CPP_SM4 = 1,
|
||||
CC_CPP_AES = 0
|
||||
};
|
||||
|
||||
enum drv_engine_type {
|
||||
DRV_ENGINE_NULL = 0,
|
||||
DRV_ENGINE_AES = 1,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/debugfs.h>
|
||||
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
|
||||
*/
|
||||
static struct dentry *cc_debugfs_dir;
|
||||
|
||||
static struct debugfs_reg32 debug_regs[] = {
|
||||
static struct debugfs_reg32 ver_sig_regs[] = {
|
||||
{ .name = "SIGNATURE" }, /* Must be 0th */
|
||||
{ .name = "VERSION" }, /* Must be 1st */
|
||||
};
|
||||
|
||||
static struct debugfs_reg32 pid_cid_regs[] = {
|
||||
CC_DEBUG_REG(PERIPHERAL_ID_0),
|
||||
CC_DEBUG_REG(PERIPHERAL_ID_1),
|
||||
CC_DEBUG_REG(PERIPHERAL_ID_2),
|
||||
CC_DEBUG_REG(PERIPHERAL_ID_3),
|
||||
CC_DEBUG_REG(PERIPHERAL_ID_4),
|
||||
CC_DEBUG_REG(COMPONENT_ID_0),
|
||||
CC_DEBUG_REG(COMPONENT_ID_1),
|
||||
CC_DEBUG_REG(COMPONENT_ID_2),
|
||||
CC_DEBUG_REG(COMPONENT_ID_3),
|
||||
};
|
||||
|
||||
static struct debugfs_reg32 debug_regs[] = {
|
||||
CC_DEBUG_REG(HOST_IRR),
|
||||
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
|
||||
CC_DEBUG_REG(AXIM_MON_ERR),
|
||||
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
struct cc_debugfs_ctx *ctx;
|
||||
struct debugfs_regset32 *regset;
|
||||
|
||||
debug_regs[0].offset = drvdata->sig_offset;
|
||||
debug_regs[1].offset = drvdata->ver_offset;
|
||||
struct debugfs_regset32 *regset, *verset;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
|
||||
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
|
||||
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
|
||||
|
||||
drvdata->debugfs = ctx;
|
||||
verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
|
||||
/* Failing here is not important enough to fail the module load */
|
||||
if (!verset)
|
||||
goto out;
|
||||
|
||||
if (drvdata->hw_rev <= CC_HW_REV_712) {
|
||||
ver_sig_regs[0].offset = drvdata->sig_offset;
|
||||
ver_sig_regs[1].offset = drvdata->ver_offset;
|
||||
verset->regs = ver_sig_regs;
|
||||
verset->nregs = ARRAY_SIZE(ver_sig_regs);
|
||||
} else {
|
||||
verset->regs = pid_cid_regs;
|
||||
verset->nregs = ARRAY_SIZE(pid_cid_regs);
|
||||
}
|
||||
verset->base = drvdata->cc_base;
|
||||
|
||||
debugfs_create_regset32("version", 0400, ctx->dir, verset);
|
||||
|
||||
out:
|
||||
drvdata->debugfs = ctx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_DEBUGFS_H__
|
||||
#define __CC_DEBUGFS_H__
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@@ -30,27 +30,47 @@
|
||||
bool cc_dump_desc;
|
||||
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
|
||||
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
|
||||
|
||||
bool cc_dump_bytes;
|
||||
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
|
||||
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
|
||||
|
||||
static bool cc_sec_disable;
|
||||
module_param_named(sec_disable, cc_sec_disable, bool, 0600);
|
||||
MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
|
||||
|
||||
struct cc_hw_data {
|
||||
char *name;
|
||||
enum cc_hw_rev rev;
|
||||
u32 sig;
|
||||
u32 cidr_0123;
|
||||
u32 pidr_0124;
|
||||
int std_bodies;
|
||||
};
|
||||
|
||||
#define CC_NUM_IDRS 4
|
||||
|
||||
/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
|
||||
static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
|
||||
CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
|
||||
CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
|
||||
};
|
||||
|
||||
static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
|
||||
CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
|
||||
CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
|
||||
};
|
||||
|
||||
/* Hardware revisions defs. */
|
||||
|
||||
/* The 703 is a OSCCA only variant of the 713 */
|
||||
static const struct cc_hw_data cc703_hw = {
|
||||
.name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
|
||||
.name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
|
||||
.pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
|
||||
};
|
||||
|
||||
static const struct cc_hw_data cc713_hw = {
|
||||
.name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
|
||||
.name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
|
||||
.pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
|
||||
};
|
||||
|
||||
static const struct cc_hw_data cc712_hw = {
|
||||
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
|
||||
|
||||
static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
|
||||
{
|
||||
int i;
|
||||
union {
|
||||
u8 regs[CC_NUM_IDRS];
|
||||
__le32 val;
|
||||
} idr;
|
||||
|
||||
for (i = 0; i < CC_NUM_IDRS; ++i)
|
||||
idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
|
||||
|
||||
return le32_to_cpu(idr.val);
|
||||
}
|
||||
|
||||
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
|
||||
{
|
||||
char prefix[64];
|
||||
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
|
||||
|
||||
drvdata->irq = irr;
|
||||
/* Completion interrupt - most probable */
|
||||
if (irr & CC_COMP_IRQ_MASK) {
|
||||
/* Mask AXI completion interrupt - will be unmasked in
|
||||
* Deferred service handler
|
||||
if (irr & drvdata->comp_mask) {
|
||||
/* Mask all completion interrupts - will be unmasked in
|
||||
* deferred service handler
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
|
||||
irr &= ~CC_COMP_IRQ_MASK;
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
|
||||
irr &= ~drvdata->comp_mask;
|
||||
complete_request(drvdata);
|
||||
}
|
||||
#ifdef CONFIG_CRYPTO_FIPS
|
||||
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
|
||||
unsigned int val, cache_params;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
/* Unmask all AXI interrupt sources AXI_CFG1 register */
|
||||
val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
|
||||
cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
|
||||
dev_dbg(dev, "AXIM_CFG=0x%08X\n",
|
||||
cc_ioread(drvdata, CC_REG(AXIM_CFG)));
|
||||
/* Unmask all AXI interrupt sources AXI_CFG1 register */
|
||||
/* AXI interrupt config are obsoleted startign at cc7x3 */
|
||||
if (drvdata->hw_rev <= CC_HW_REV_712) {
|
||||
val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
|
||||
cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
|
||||
dev_dbg(dev, "AXIM_CFG=0x%08X\n",
|
||||
cc_ioread(drvdata, CC_REG(AXIM_CFG)));
|
||||
}
|
||||
|
||||
/* Clear all pending interrupts */
|
||||
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
|
||||
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
|
||||
|
||||
/* Unmask relevant interrupt cause */
|
||||
val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
|
||||
val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
|
||||
|
||||
if (drvdata->hw_rev >= CC_HW_REV_712)
|
||||
val |= CC_GPR0_IRQ_MASK;
|
||||
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
struct cc_drvdata *new_drvdata;
|
||||
struct device *dev = &plat_dev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
u32 signature_val;
|
||||
u32 val, hw_rev_pidr, sig_cidr;
|
||||
u64 dma_mask;
|
||||
const struct cc_hw_data *hw_rev;
|
||||
const struct of_device_id *dev_id;
|
||||
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
|
||||
}
|
||||
|
||||
new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
|
||||
|
||||
platform_set_drvdata(plat_dev, new_drvdata);
|
||||
new_drvdata->plat_dev = plat_dev;
|
||||
|
||||
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
new_drvdata->sec_disabled = cc_sec_disable;
|
||||
|
||||
if (hw_rev->rev <= CC_HW_REV_712) {
|
||||
/* Verify correct mapping */
|
||||
signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
|
||||
if (signature_val != hw_rev->sig) {
|
||||
val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
|
||||
if (val != hw_rev->sig) {
|
||||
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
|
||||
signature_val, hw_rev->sig);
|
||||
val, hw_rev->sig);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
}
|
||||
dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
|
||||
sig_cidr = val;
|
||||
hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
|
||||
} else {
|
||||
/* Verify correct mapping */
|
||||
val = cc_read_idr(new_drvdata, pidr_0124_offsets);
|
||||
if (val != hw_rev->pidr_0124) {
|
||||
dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
|
||||
val, hw_rev->pidr_0124);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
}
|
||||
hw_rev_pidr = val;
|
||||
|
||||
val = cc_read_idr(new_drvdata, cidr_0123_offsets);
|
||||
if (val != hw_rev->cidr_0123) {
|
||||
dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
|
||||
val, hw_rev->cidr_0123);
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
}
|
||||
sig_cidr = val;
|
||||
|
||||
/* Check security disable state */
|
||||
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
|
||||
val &= CC_SECURITY_DISABLED_MASK;
|
||||
new_drvdata->sec_disabled |= !!val;
|
||||
|
||||
if (!new_drvdata->sec_disabled) {
|
||||
new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
|
||||
if (new_drvdata->std_bodies & CC_STD_NIST)
|
||||
new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_drvdata->sec_disabled)
|
||||
dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
|
||||
|
||||
/* Display HW versions */
|
||||
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
|
||||
hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
|
||||
DRV_MODULE_VERSION);
|
||||
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
|
||||
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
|
||||
|
||||
rc = init_cc_regs(new_drvdata, true);
|
||||
if (rc) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_driver.h
|
||||
* ARM CryptoCell Linux Crypto Driver
|
||||
@@ -65,10 +65,32 @@ enum cc_std_body {
|
||||
|
||||
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
|
||||
|
||||
#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
|
||||
|
||||
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
|
||||
|
||||
#define CC_CPP_AES_ABORT_MASK ( \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
|
||||
|
||||
#define CC_CPP_SM4_ABORT_MASK ( \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
|
||||
BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
|
||||
|
||||
/* Register name mangling macro */
|
||||
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
|
||||
|
||||
@@ -81,7 +103,6 @@ enum cc_std_body {
|
||||
|
||||
#define MAX_REQUEST_QUEUE_SIZE 4096
|
||||
#define MAX_MLLI_BUFF_SIZE 2080
|
||||
#define MAX_ICV_NENTS_SUPPORTED 2
|
||||
|
||||
/* Definitions for HW descriptors DIN/DOUT fields */
|
||||
#define NS_BIT 1
|
||||
@@ -90,6 +111,12 @@ enum cc_std_body {
|
||||
* field in the HW descriptor. The DMA engine +8 that value.
|
||||
*/
|
||||
|
||||
struct cc_cpp_req {
|
||||
bool is_cpp;
|
||||
enum cc_cpp_alg alg;
|
||||
u8 slot;
|
||||
};
|
||||
|
||||
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
|
||||
struct cc_crypto_req {
|
||||
void (*user_cb)(struct device *dev, void *req, int err);
|
||||
@@ -104,6 +131,7 @@ struct cc_crypto_req {
|
||||
/* The generated IV size required, 8/16 B allowed. */
|
||||
unsigned int ivgen_size;
|
||||
struct completion seq_compl; /* request completion */
|
||||
struct cc_cpp_req cpp;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -136,6 +164,8 @@ struct cc_drvdata {
|
||||
u32 sig_offset;
|
||||
u32 ver_offset;
|
||||
int std_bodies;
|
||||
bool sec_disabled;
|
||||
u32 comp_mask;
|
||||
};
|
||||
|
||||
struct cc_crypto_alg {
|
||||
@@ -162,12 +192,14 @@ struct cc_alg_template {
|
||||
int auth_mode;
|
||||
u32 min_hw_rev;
|
||||
enum cc_std_body std_body;
|
||||
bool sec_func;
|
||||
unsigned int data_unit;
|
||||
struct cc_drvdata *drvdata;
|
||||
};
|
||||
|
||||
struct async_gen_req_ctx {
|
||||
dma_addr_t iv_dma_addr;
|
||||
u8 *iv;
|
||||
enum drv_crypto_direction op_type;
|
||||
};
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fips.h>
|
||||
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
|
||||
|
||||
/* Kill tasklet */
|
||||
tasklet_kill(&fips_h->tasklet);
|
||||
|
||||
kfree(fips_h);
|
||||
drvdata->fips_handle = NULL;
|
||||
}
|
||||
|
||||
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
|
||||
dev_err(dev, "TEE reported error!\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* This function check if cryptocell tee fips error occurred
|
||||
* and in such case triggers system error
|
||||
*/
|
||||
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(p_drvdata);
|
||||
|
||||
if (!cc_get_tee_fips_status(p_drvdata))
|
||||
tee_fips_error(dev);
|
||||
}
|
||||
|
||||
/* Deferred service handler, run as interrupt-fired tasklet */
|
||||
static void fips_dsr(unsigned long devarg)
|
||||
{
|
||||
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
u32 irq, state, val;
|
||||
u32 irq, val;
|
||||
|
||||
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
|
||||
|
||||
if (irq) {
|
||||
state = cc_ioread(drvdata, CC_REG(GPR_HOST));
|
||||
|
||||
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
|
||||
tee_fips_error(dev);
|
||||
cc_tee_handle_fips_error(drvdata);
|
||||
}
|
||||
|
||||
/* after verifing that there is nothing to do,
|
||||
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
|
||||
if (p_drvdata->hw_rev < CC_HW_REV_712)
|
||||
return 0;
|
||||
|
||||
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
|
||||
fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
|
||||
if (!fips_h)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
|
||||
dev_dbg(dev, "Initializing fips tasklet\n");
|
||||
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
|
||||
|
||||
if (!cc_get_tee_fips_status(p_drvdata))
|
||||
tee_fips_error(dev);
|
||||
cc_tee_handle_fips_error(p_drvdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_FIPS_H__
|
||||
#define __CC_FIPS_H__
|
||||
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
|
||||
void cc_fips_fini(struct cc_drvdata *drvdata);
|
||||
void fips_handler(struct cc_drvdata *drvdata);
|
||||
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
|
||||
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
|
||||
|
||||
#else /* CONFIG_CRYPTO_FIPS */
|
||||
|
||||
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
|
||||
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
|
||||
bool ok) {}
|
||||
static inline void fips_handler(struct cc_drvdata *drvdata) {}
|
||||
static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_FIPS */
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@@ -69,6 +69,7 @@ struct cc_hash_alg {
|
||||
struct hash_key_req_ctx {
|
||||
u32 keylen;
|
||||
dma_addr_t key_dma_addr;
|
||||
u8 *key;
|
||||
};
|
||||
|
||||
/* hash per-session context */
|
||||
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
|
||||
|
||||
dev_dbg(dev, "req=%pK\n", req);
|
||||
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
req->base.complete(&req->base, err);
|
||||
if (err != -EINPROGRESS) {
|
||||
/* Not a BACKLOG notification */
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
}
|
||||
|
||||
ahash_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
|
||||
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
|
||||
|
||||
dev_dbg(dev, "req=%pK\n", req);
|
||||
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_result(dev, state, digestsize, req->result);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
req->base.complete(&req->base, err);
|
||||
if (err != -EINPROGRESS) {
|
||||
/* Not a BACKLOG notification */
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_result(dev, state, digestsize, req->result);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
}
|
||||
|
||||
ahash_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
|
||||
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
|
||||
|
||||
dev_dbg(dev, "req=%pK\n", req);
|
||||
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_result(dev, state, digestsize, req->result);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
req->base.complete(&req->base, err);
|
||||
if (err != -EINPROGRESS) {
|
||||
/* Not a BACKLOG notification */
|
||||
cc_unmap_hash_request(dev, state, req->src, false);
|
||||
cc_unmap_result(dev, state, digestsize, req->result);
|
||||
cc_unmap_req(dev, state, ctx);
|
||||
}
|
||||
|
||||
ahash_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
|
||||
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
|
||||
ctx->key_params.keylen = keylen;
|
||||
ctx->key_params.key_dma_addr = 0;
|
||||
ctx->is_hmac = true;
|
||||
ctx->key_params.key = NULL;
|
||||
|
||||
if (keylen) {
|
||||
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
|
||||
if (!ctx->key_params.key)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->key_params.key_dma_addr =
|
||||
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
|
||||
dma_map_single(dev, (void *)ctx->key_params.key, keylen,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
key, keylen);
|
||||
ctx->key_params.key, keylen);
|
||||
kzfree(ctx->key_params.key);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
|
||||
@@ -887,6 +907,9 @@ out:
|
||||
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
||||
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
|
||||
}
|
||||
|
||||
kzfree(ctx->key_params.key);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
|
||||
|
||||
ctx->key_params.keylen = keylen;
|
||||
|
||||
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
|
||||
if (!ctx->key_params.key)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->key_params.key_dma_addr =
|
||||
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
|
||||
dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
|
||||
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
|
||||
key, keylen);
|
||||
kzfree(ctx->key_params.key);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
|
||||
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
|
||||
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
|
||||
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
|
||||
|
||||
kzfree(ctx->key_params.key);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
|
||||
.setkey = cc_hash_setkey,
|
||||
.halg = {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
|
||||
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
|
||||
},
|
||||
},
|
||||
.hash_mode = DRV_HASH_SHA224,
|
||||
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
|
||||
.setkey = cc_hash_setkey,
|
||||
.halg = {
|
||||
.digestsize = SHA384_DIGEST_SIZE,
|
||||
.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
|
||||
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
|
||||
},
|
||||
},
|
||||
.hash_mode = DRV_HASH_SHA384,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_hash.h
|
||||
* ARM CryptoCell Hash Crypto API
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
|
||||
|
||||
#ifndef __CC_HOST_H__
|
||||
#define __CC_HOST_H__
|
||||
@@ -7,33 +7,102 @@
|
||||
// --------------------------------------
|
||||
// BLOCK: HOST_P
|
||||
// --------------------------------------
|
||||
|
||||
|
||||
/* IRR */
|
||||
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
|
||||
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
|
||||
#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
|
||||
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
|
||||
#define CC_HOST_IMR_REG_OFFSET 0xA04UL
|
||||
#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
|
||||
|
||||
/* IMR */
|
||||
#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
|
||||
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
|
||||
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
|
||||
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
|
||||
#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
|
||||
|
||||
/* ICR */
|
||||
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
|
||||
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
|
||||
@@ -45,6 +114,9 @@
|
||||
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
|
||||
#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
|
||||
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
|
||||
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
|
||||
@@ -132,6 +204,49 @@
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
|
||||
// --------------------------------------
|
||||
// BLOCK: ID_REGISTERS
|
||||
// --------------------------------------
|
||||
#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
|
||||
#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
|
||||
#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
|
||||
#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
|
||||
#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
|
||||
#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
|
||||
#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
|
||||
#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
|
||||
#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
|
||||
#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
|
||||
#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
|
||||
#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
|
||||
#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
|
||||
#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
|
||||
#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
|
||||
#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
|
||||
#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
|
||||
#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
|
||||
#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
|
||||
#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
|
||||
#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
|
||||
#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
|
||||
#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
|
||||
#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
|
||||
#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
|
||||
#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
|
||||
#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
|
||||
#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
|
||||
#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
|
||||
#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
|
||||
#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
|
||||
#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
|
||||
#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
|
||||
#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
|
||||
#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
|
||||
// --------------------------------------
|
||||
// BLOCK: HOST_SRAM
|
||||
// --------------------------------------
|
||||
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_HW_QUEUE_DEFS_H__
|
||||
#define __CC_HW_QUEUE_DEFS_H__
|
||||
@@ -28,11 +28,13 @@
|
||||
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
|
||||
|
||||
#define WORD0_VALUE CC_GENMASK(0, VALUE)
|
||||
#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
|
||||
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
|
||||
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
|
||||
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
|
||||
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
|
||||
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
|
||||
#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
|
||||
#define WORD2_VALUE CC_GENMASK(2, VALUE)
|
||||
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
|
||||
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
|
||||
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
|
||||
END_OF_KEYS = S32_MAX,
|
||||
};
|
||||
|
||||
#define CC_NUM_HW_KEY_SLOTS 4
|
||||
#define CC_FIRST_HW_KEY_SLOT 0
|
||||
#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
|
||||
|
||||
#define CC_NUM_CPP_KEY_SLOTS 8
|
||||
#define CC_FIRST_CPP_KEY_SLOT 16
|
||||
#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
|
||||
CC_NUM_CPP_KEY_SLOTS - 1)
|
||||
|
||||
enum cc_hw_aes_key_size {
|
||||
AES_128_KEY = 0,
|
||||
AES_192_KEY = 1,
|
||||
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
|
||||
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
|
||||
};
|
||||
|
||||
#define CC_CPP_DIN_ADDR 0xFF00FF00UL
|
||||
#define CC_CPP_DIN_SIZE 0xFF00FFUL
|
||||
|
||||
/*****************************/
|
||||
/* Descriptor packing macros */
|
||||
/*****************************/
|
||||
@@ -248,6 +262,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the special CPP descriptor
|
||||
*
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @alg: cipher used (AES / SM4)
|
||||
* @mode: mode used (CTR or CBC)
|
||||
* @slot: slot number
|
||||
* @ksize: key size
|
||||
*/
|
||||
static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
|
||||
{
|
||||
pdesc->word[0] |= CC_CPP_DIN_ADDR;
|
||||
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
|
||||
pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
|
||||
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the DIN field of a HW descriptors to SRAM mode.
|
||||
* Note: No need to check SRAM alignment since host requests do not use SRAM and
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <crypto/ctr.h>
|
||||
#include "cc_driver.h"
|
||||
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
|
||||
}
|
||||
|
||||
ivgen_ctx->pool = NULL_SRAM_ADDR;
|
||||
|
||||
/* release "this" context */
|
||||
kfree(ivgen_ctx);
|
||||
}
|
||||
|
||||
/*!
|
||||
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
|
||||
int rc;
|
||||
|
||||
/* Allocate "this" context */
|
||||
ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
|
||||
ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
|
||||
if (!ivgen_ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
drvdata->ivgen_handle = ivgen_ctx;
|
||||
|
||||
/* Allocate pool's header for initial enc. key/IV */
|
||||
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
|
||||
&ivgen_ctx->pool_meta_dma,
|
||||
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
|
||||
goto out;
|
||||
}
|
||||
|
||||
drvdata->ivgen_handle = ivgen_ctx;
|
||||
|
||||
return cc_init_iv_sram(drvdata);
|
||||
|
||||
out:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_IVGEN_H__
|
||||
#define __CC_IVGEN_H__
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_CRYS_KERNEL_H__
|
||||
#define __CC_CRYS_KERNEL_H__
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef _CC_LLI_DEFS_H_
|
||||
#define _CC_LLI_DEFS_H_
|
||||
@@ -14,7 +14,7 @@
|
||||
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
|
||||
|
||||
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
|
||||
#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
|
||||
#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
|
||||
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
|
||||
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
|
||||
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "cc_ivgen.h"
|
||||
#include "cc_hash.h"
|
||||
#include "cc_pm.h"
|
||||
#include "cc_fips.h"
|
||||
|
||||
#define POWER_DOWN_ENABLE 0x01
|
||||
#define POWER_DOWN_DISABLE 0x00
|
||||
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
|
||||
rc = cc_suspend_req_queue(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
fini_cc_regs(drvdata);
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
|
||||
cc_clk_off(drvdata);
|
||||
return 0;
|
||||
}
|
||||
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
|
||||
|
||||
/* Enables the device source clk */
|
||||
rc = cc_clk_on(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed getting clock back on. We're toast.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
|
||||
rc = init_cc_regs(drvdata, false);
|
||||
if (rc) {
|
||||
dev_err(dev, "init_cc_regs (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
/* check if tee fips error occurred during power down */
|
||||
cc_tee_handle_fips_error(drvdata);
|
||||
|
||||
rc = cc_resume_req_queue(drvdata);
|
||||
if (rc) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_pm.h
|
||||
*/
|
||||
|
@@ -1,7 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/nospec.h>
|
||||
#include "cc_driver.h"
|
||||
#include "cc_buffer_mgr.h"
|
||||
#include "cc_request_mgr.h"
|
||||
@@ -52,11 +53,38 @@ struct cc_bl_item {
|
||||
bool notif;
|
||||
};
|
||||
|
||||
static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
|
||||
{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
|
||||
{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
|
||||
BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
|
||||
};
|
||||
|
||||
static void comp_handler(unsigned long devarg);
|
||||
#ifdef COMP_IN_WQ
|
||||
static void comp_work_handler(struct work_struct *work);
|
||||
#endif
|
||||
|
||||
static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
|
||||
{
|
||||
alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
|
||||
slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
|
||||
|
||||
return cc_cpp_int_masks[alg][slot];
|
||||
}
|
||||
|
||||
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
|
||||
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
|
||||
struct cc_bl_item *bli)
|
||||
{
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
||||
spin_lock_bh(&mgr->bl_lock);
|
||||
list_add_tail(&bli->list, &mgr->backlog);
|
||||
++mgr->bl_len;
|
||||
dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
|
||||
spin_unlock_bh(&mgr->bl_lock);
|
||||
tasklet_schedule(&mgr->comptask);
|
||||
}
|
||||
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
|
||||
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
|
||||
struct cc_bl_item *bli;
|
||||
struct cc_crypto_req *creq;
|
||||
struct crypto_async_request *req;
|
||||
void *req;
|
||||
bool ivgen;
|
||||
unsigned int total_len;
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
|
||||
|
||||
while (mgr->bl_len) {
|
||||
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
|
||||
dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
|
||||
|
||||
spin_unlock(&mgr->bl_lock);
|
||||
|
||||
|
||||
creq = &bli->creq;
|
||||
req = (struct crypto_async_request *)creq->user_arg;
|
||||
req = creq->user_arg;
|
||||
|
||||
/*
|
||||
* Notify the request we're moving out of the backlog
|
||||
* but only if we haven't done so already.
|
||||
*/
|
||||
if (!bli->notif) {
|
||||
req->complete(req, -EINPROGRESS);
|
||||
creq->user_cb(dev, req, -EINPROGRESS);
|
||||
bli->notif = true;
|
||||
}
|
||||
|
||||
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
|
||||
drvdata->request_mgr_handle;
|
||||
unsigned int *tail = &request_mgr_handle->req_queue_tail;
|
||||
unsigned int *head = &request_mgr_handle->req_queue_head;
|
||||
int rc;
|
||||
u32 mask;
|
||||
|
||||
while (request_mgr_handle->axi_completed) {
|
||||
request_mgr_handle->axi_completed--;
|
||||
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
|
||||
|
||||
cc_req = &request_mgr_handle->req_queue[*tail];
|
||||
|
||||
if (cc_req->cpp.is_cpp) {
|
||||
|
||||
dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
|
||||
cc_req->cpp.slot, cc_req->cpp.alg);
|
||||
mask = cc_cpp_int_mask(cc_req->cpp.alg,
|
||||
cc_req->cpp.slot);
|
||||
rc = (drvdata->irq & mask ? -EPERM : 0);
|
||||
dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
|
||||
drvdata->irq, rc);
|
||||
} else {
|
||||
dev_dbg(dev, "None CPP request completion\n");
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
if (cc_req->user_cb)
|
||||
cc_req->user_cb(dev, cc_req->user_arg, 0);
|
||||
cc_req->user_cb(dev, cc_req->user_arg, rc);
|
||||
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
|
||||
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
|
||||
dev_dbg(dev, "Request completed. axi_completed=%d\n",
|
||||
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
|
||||
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
u32 irq;
|
||||
|
||||
irq = (drvdata->irq & CC_COMP_IRQ_MASK);
|
||||
dev_dbg(dev, "Completion handler called!\n");
|
||||
irq = (drvdata->irq & drvdata->comp_mask);
|
||||
|
||||
if (irq & CC_COMP_IRQ_MASK) {
|
||||
/* To avoid the interrupt from firing as we unmask it,
|
||||
* we clear it now
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
|
||||
/* To avoid the interrupt from firing as we unmask it,
|
||||
* we clear it now
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
|
||||
|
||||
/* Avoid race with above clear: Test completion counter
|
||||
* once more
|
||||
*/
|
||||
request_mgr_handle->axi_completed +=
|
||||
cc_axi_comp_count(drvdata);
|
||||
/* Avoid race with above clear: Test completion counter once more */
|
||||
|
||||
while (request_mgr_handle->axi_completed) {
|
||||
do {
|
||||
proc_completions(drvdata);
|
||||
/* At this point (after proc_completions()),
|
||||
* request_mgr_handle->axi_completed is 0.
|
||||
*/
|
||||
request_mgr_handle->axi_completed =
|
||||
cc_axi_comp_count(drvdata);
|
||||
} while (request_mgr_handle->axi_completed > 0);
|
||||
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR),
|
||||
CC_COMP_IRQ_MASK);
|
||||
dev_dbg(dev, "AXI completion after updated: %d\n",
|
||||
request_mgr_handle->axi_completed);
|
||||
|
||||
while (request_mgr_handle->axi_completed) {
|
||||
do {
|
||||
drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
|
||||
irq = (drvdata->irq & drvdata->comp_mask);
|
||||
proc_completions(drvdata);
|
||||
|
||||
/* At this point (after proc_completions()),
|
||||
* request_mgr_handle->axi_completed is 0.
|
||||
*/
|
||||
request_mgr_handle->axi_completed +=
|
||||
cc_axi_comp_count(drvdata);
|
||||
}
|
||||
cc_axi_comp_count(drvdata);
|
||||
} while (request_mgr_handle->axi_completed > 0);
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
|
||||
|
||||
request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
|
||||
}
|
||||
|
||||
/* after verifing that there is nothing to do,
|
||||
* unmask AXI completion interrupt
|
||||
*/
|
||||
cc_iowrite(drvdata, CC_REG(HOST_IMR),
|
||||
cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
|
||||
cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
|
||||
|
||||
cc_proc_backlog(drvdata);
|
||||
dev_dbg(dev, "Comp. handler done.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
/* \file cc_request_mgr.h
|
||||
* Request Manager
|
||||
|
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#include "cc_driver.h"
|
||||
#include "cc_sram_mgr.h"
|
||||
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
|
||||
*/
|
||||
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
/* Free "this" context */
|
||||
kfree(drvdata->sram_mgr_handle);
|
||||
/* Nothing needed */
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
|
||||
}
|
||||
|
||||
/* Allocate "this" context */
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
|
||||
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
|
||||
|
||||
#ifndef __CC_SRAM_MGR_H__
|
||||
#define __CC_SRAM_MGR_H__
|
||||
|
@@ -2130,7 +2130,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
* ipad in hmacctx->ipad and opad in hmacctx->opad location
|
||||
*/
|
||||
shash->tfm = hmacctx->base_hash;
|
||||
shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
|
||||
if (keylen > bs) {
|
||||
err = crypto_shash_digest(shash, key, keylen,
|
||||
hmacctx->ipad);
|
||||
@@ -3517,7 +3516,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||
SHASH_DESC_ON_STACK(shash, base_hash);
|
||||
|
||||
shash->tfm = base_hash;
|
||||
shash->flags = crypto_shash_get_flags(base_hash);
|
||||
bs = crypto_shash_blocksize(base_hash);
|
||||
align = KEYCTX_ALIGN_PAD(max_authsize);
|
||||
o_ptr = actx->h_iopad + param.result_size + align;
|
||||
|
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
struct hifn_device *dev = ctx->dev;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
dev->flags &= ~HIFN_FLAG_OLD_KEY;
|
||||
|
||||
memcpy(ctx->key, key, len);
|
||||
ctx->keysize = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hifn_handle_req(struct ablkcipher_request *req)
|
||||
{
|
||||
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_cfb,
|
||||
.decrypt = hifn_decrypt_3des_cfb,
|
||||
},
|
||||
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_ofb,
|
||||
.decrypt = hifn_decrypt_3des_ofb,
|
||||
},
|
||||
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
|
||||
.ivsize = HIFN_IV_LENGTH,
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_cbc,
|
||||
.decrypt = hifn_decrypt_3des_cbc,
|
||||
},
|
||||
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.max_keysize = HIFN_3DES_KEY_LENGTH,
|
||||
.setkey = hifn_setkey,
|
||||
.setkey = hifn_des3_setkey,
|
||||
.encrypt = hifn_encrypt_3des_ecb,
|
||||
.decrypt = hifn_decrypt_3des_ecb,
|
||||
},
|
||||
|
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
|
||||
static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
if (keylen != DES_KEY_SIZE * 3)
|
||||
return -EINVAL;
|
||||
|
||||
return sec_alg_skcipher_setkey(tfm, key, keylen,
|
||||
return unlikely(des3_verify_key(tfm, key)) ?:
|
||||
sec_alg_skcipher_setkey(tfm, key, keylen,
|
||||
SEC_C_3DES_ECB_192_3KEY);
|
||||
}
|
||||
|
||||
static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
if (keylen != DES3_EDE_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
return sec_alg_skcipher_setkey(tfm, key, keylen,
|
||||
return unlikely(des3_verify_key(tfm, key)) ?:
|
||||
sec_alg_skcipher_setkey(tfm, key, keylen,
|
||||
SEC_C_3DES_CBC_192_3KEY);
|
||||
}
|
||||
|
||||
|
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
|
||||
static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
|
||||
const u8 *key, unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
|
||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
|
||||
int err;
|
||||
|
||||
if (len != DES3_EDE_KEY_SIZE) {
|
||||
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = des3_verify_key(ctfm, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
/* if context exits and key changed, need to invalidate it */
|
||||
if (ctx->base.ctxr_dma) {
|
||||
|
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
|
||||
return -EINVAL;
|
||||
}
|
||||
cipher_cfg |= keylen_cfg;
|
||||
} else if (cipher_cfg & MOD_3DES) {
|
||||
const u32 *K = (const u32 *)key;
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))
|
||||
{
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
if (des_ekey(tmp, key) == 0) {
|
||||
@@ -859,6 +851,19 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
u32 flags = crypto_ablkcipher_get_flags(tfm);
|
||||
int err;
|
||||
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err))
|
||||
crypto_ablkcipher_set_flags(tfm, flags);
|
||||
|
||||
return ablk_setkey(tfm, key, key_len);
|
||||
}
|
||||
|
||||
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
@@ -1175,6 +1180,43 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
struct crypto_authenc_keys keys;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.authkeylen > sizeof(ctx->authkey))
|
||||
goto badkey;
|
||||
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(tfm);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
|
||||
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
|
||||
ctx->authkey_len = keys.authkeylen;
|
||||
ctx->enckey_len = keys.enckeylen;
|
||||
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return aead_setup(tfm, crypto_aead_authsize(tfm));
|
||||
badkey:
|
||||
crypto_aead_set_flags(tfm, flags);
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
}
|
||||
|
||||
static int aead_encrypt(struct aead_request *req)
|
||||
{
|
||||
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
|
||||
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = ablk_des3_setkey,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
|
||||
.cra_u = { .ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = ablk_des3_setkey,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.setkey = des3_aead_setkey,
|
||||
},
|
||||
.hash = &hash_alg_md5,
|
||||
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.setkey = des3_aead_setkey,
|
||||
},
|
||||
.hash = &hash_alg_sha1,
|
||||
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
|
||||
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
|
||||
/* authenc */
|
||||
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC;
|
||||
cra->setkey = aead_setkey;
|
||||
cra->setkey = cra->setkey ?: aead_setkey;
|
||||
cra->setauthsize = aead_setauthsize;
|
||||
cra->encrypt = aead_encrypt;
|
||||
cra->decrypt = aead_decrypt;
|
||||
|
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int len)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
|
||||
int err;
|
||||
|
||||
if (len != DES3_EDE_KEY_SIZE) {
|
||||
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = des3_verify_key(cipher, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
|
||||
|
||||
|
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
|
||||
|
||||
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
|
||||
{
|
||||
unsigned int index, padlen;
|
||||
unsigned int padlen;
|
||||
|
||||
buf[0] = 0x80;
|
||||
/* Pad out to 56 mod 64 */
|
||||
index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
|
||||
padlen = mv_cesa_ahash_pad_len(creq);
|
||||
memset(buf + 1, 0, padlen - 1);
|
||||
|
||||
|
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
|
||||
SHASH_DESC_ON_STACK(shash, bctx->shash);
|
||||
|
||||
shash->tfm = bctx->shash;
|
||||
shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
|
||||
|
||||
return crypto_shash_init(shash) ?:
|
||||
crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
|
||||
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
SHASH_DESC_ON_STACK(shash, bctx->shash);
|
||||
|
||||
shash->tfm = bctx->shash;
|
||||
shash->flags = crypto_shash_get_flags(bctx->shash) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
if (keylen > bs) {
|
||||
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
|
||||
|
@@ -1,767 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
|
||||
*
|
||||
* The driver is based on information gathered from
|
||||
* drivers/mxc/security/mxc_scc.c which can be found in
|
||||
* the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
|
||||
/* Secure Memory (SCM) registers */
|
||||
#define SCC_SCM_RED_START 0x0000
|
||||
#define SCC_SCM_BLACK_START 0x0004
|
||||
#define SCC_SCM_LENGTH 0x0008
|
||||
#define SCC_SCM_CTRL 0x000C
|
||||
#define SCC_SCM_STATUS 0x0010
|
||||
#define SCC_SCM_ERROR_STATUS 0x0014
|
||||
#define SCC_SCM_INTR_CTRL 0x0018
|
||||
#define SCC_SCM_CFG 0x001C
|
||||
#define SCC_SCM_INIT_VECTOR_0 0x0020
|
||||
#define SCC_SCM_INIT_VECTOR_1 0x0024
|
||||
#define SCC_SCM_RED_MEMORY 0x0400
|
||||
#define SCC_SCM_BLACK_MEMORY 0x0800
|
||||
|
||||
/* Security Monitor (SMN) Registers */
|
||||
#define SCC_SMN_STATUS 0x1000
|
||||
#define SCC_SMN_COMMAND 0x1004
|
||||
#define SCC_SMN_SEQ_START 0x1008
|
||||
#define SCC_SMN_SEQ_END 0x100C
|
||||
#define SCC_SMN_SEQ_CHECK 0x1010
|
||||
#define SCC_SMN_BIT_COUNT 0x1014
|
||||
#define SCC_SMN_BITBANK_INC_SIZE 0x1018
|
||||
#define SCC_SMN_BITBANK_DECREMENT 0x101C
|
||||
#define SCC_SMN_COMPARE_SIZE 0x1020
|
||||
#define SCC_SMN_PLAINTEXT_CHECK 0x1024
|
||||
#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
|
||||
#define SCC_SMN_TIMER_IV 0x102C
|
||||
#define SCC_SMN_TIMER_CONTROL 0x1030
|
||||
#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
|
||||
#define SCC_SMN_TIMER 0x1038
|
||||
|
||||
#define SCC_SCM_CTRL_START_CIPHER BIT(2)
|
||||
#define SCC_SCM_CTRL_CBC_MODE BIT(1)
|
||||
#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
|
||||
|
||||
#define SCC_SCM_STATUS_LEN_ERR BIT(12)
|
||||
#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
|
||||
#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
|
||||
#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
|
||||
#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
|
||||
#define SCC_SCM_STATUS_SEC_KEY BIT(7)
|
||||
#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
|
||||
#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
|
||||
#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
|
||||
#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
|
||||
#define SCC_SCM_STATUS_CIPHERING BIT(2)
|
||||
#define SCC_SCM_STATUS_ZEROIZING BIT(1)
|
||||
#define SCC_SCM_STATUS_BUSY BIT(0)
|
||||
|
||||
#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
|
||||
#define SCC_SMN_STATE_START 0x0
|
||||
/* The SMN is zeroizing its RAM during reset */
|
||||
#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
|
||||
/* SMN has passed internal checks */
|
||||
#define SCC_SMN_STATE_HEALTH_CHECK 0x6
|
||||
/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
|
||||
#define SCC_SMN_STATE_FAIL 0x9
|
||||
/* SCC is in secure state. SCM is using secret key. */
|
||||
#define SCC_SMN_STATE_SECURE 0xA
|
||||
/* SCC is not secure. SCM is using default key. */
|
||||
#define SCC_SMN_STATE_NON_SECURE 0xC
|
||||
|
||||
#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
|
||||
#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
|
||||
#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
|
||||
|
||||
/* Size, in blocks, of Red memory. */
|
||||
#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
|
||||
#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
|
||||
/* Size, in blocks, of Black memory. */
|
||||
#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
|
||||
#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
|
||||
/* Number of bytes per block. */
|
||||
#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
|
||||
|
||||
#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
|
||||
#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
|
||||
#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
|
||||
#define SCC_SMN_COMMAND_EN_INTR BIT(1)
|
||||
#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
|
||||
|
||||
#define SCC_KEY_SLOTS 20
|
||||
#define SCC_MAX_KEY_SIZE 32
|
||||
#define SCC_KEY_SLOT_SIZE 32
|
||||
|
||||
#define SCC_CRC_CCITT_START 0xFFFF
|
||||
|
||||
/*
|
||||
* Offset into each RAM of the base of the area which is not
|
||||
* used for Stored Keys.
|
||||
*/
|
||||
#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
|
||||
|
||||
/* Fixed padding for appending to plaintext to fill out a block */
|
||||
static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
|
||||
|
||||
enum mxc_scc_state {
|
||||
SCC_STATE_OK,
|
||||
SCC_STATE_UNIMPLEMENTED,
|
||||
SCC_STATE_FAILED
|
||||
};
|
||||
|
||||
struct mxc_scc {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
bool hw_busy;
|
||||
spinlock_t lock;
|
||||
struct crypto_queue queue;
|
||||
struct crypto_async_request *req;
|
||||
int block_size_bytes;
|
||||
int black_ram_size_blocks;
|
||||
int memory_size_bytes;
|
||||
int bytes_remaining;
|
||||
|
||||
void __iomem *red_memory;
|
||||
void __iomem *black_memory;
|
||||
};
|
||||
|
||||
struct mxc_scc_ctx {
|
||||
struct mxc_scc *scc;
|
||||
struct scatterlist *sg_src;
|
||||
size_t src_nents;
|
||||
struct scatterlist *sg_dst;
|
||||
size_t dst_nents;
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
unsigned int ctrl;
|
||||
};
|
||||
|
||||
struct mxc_scc_crypto_tmpl {
|
||||
struct mxc_scc *scc;
|
||||
struct crypto_alg alg;
|
||||
};
|
||||
|
||||
static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mxc_scc *scc = ctx->scc;
|
||||
size_t len;
|
||||
void __iomem *from;
|
||||
|
||||
if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
|
||||
from = scc->red_memory;
|
||||
else
|
||||
from = scc->black_memory;
|
||||
|
||||
dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
|
||||
ctx->dst_nents * 8);
|
||||
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
|
||||
from, ctx->size, ctx->offset);
|
||||
if (!len) {
|
||||
dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"red memory@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
scc->red_memory, ctx->size, 1);
|
||||
print_hex_dump(KERN_ERR,
|
||||
"black memory@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
scc->black_memory, ctx->size, 1);
|
||||
#endif
|
||||
|
||||
ctx->offset += len;
|
||||
|
||||
if (ctx->offset < ablkreq->nbytes)
|
||||
return -EINPROGRESS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
|
||||
struct mxc_scc_ctx *ctx)
|
||||
{
|
||||
struct mxc_scc *scc = ctx->scc;
|
||||
int nents;
|
||||
|
||||
nents = sg_nents_for_len(req->src, req->nbytes);
|
||||
if (nents < 0) {
|
||||
dev_err(scc->dev, "Invalid number of src SC");
|
||||
return nents;
|
||||
}
|
||||
ctx->src_nents = nents;
|
||||
|
||||
nents = sg_nents_for_len(req->dst, req->nbytes);
|
||||
if (nents < 0) {
|
||||
dev_err(scc->dev, "Invalid number of dst SC");
|
||||
return nents;
|
||||
}
|
||||
ctx->dst_nents = nents;
|
||||
|
||||
ctx->size = 0;
|
||||
ctx->offset = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
|
||||
struct mxc_scc_ctx *ctx,
|
||||
int result)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mxc_scc *scc = ctx->scc;
|
||||
|
||||
scc->req = NULL;
|
||||
scc->bytes_remaining = scc->memory_size_bytes;
|
||||
|
||||
if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
|
||||
memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
|
||||
scc->block_size_bytes);
|
||||
|
||||
req->complete(req, result);
|
||||
scc->hw_busy = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
|
||||
size_t len = min_t(size_t, req->nbytes - ctx->offset,
|
||||
ctx->scc->bytes_remaining);
|
||||
unsigned int padding_byte_count = 0;
|
||||
struct mxc_scc *scc = ctx->scc;
|
||||
void __iomem *to;
|
||||
|
||||
if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
|
||||
to = scc->black_memory;
|
||||
else
|
||||
to = scc->red_memory;
|
||||
|
||||
if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
|
||||
memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
|
||||
scc->block_size_bytes);
|
||||
|
||||
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
|
||||
to, len, ctx->offset);
|
||||
if (!len) {
|
||||
dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->size = len;
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
|
||||
print_hex_dump(KERN_ERR,
|
||||
"init vector0@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
|
||||
1);
|
||||
print_hex_dump(KERN_ERR,
|
||||
"red memory@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
scc->red_memory, ctx->size, 1);
|
||||
print_hex_dump(KERN_ERR,
|
||||
"black memory@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
scc->black_memory, ctx->size, 1);
|
||||
#endif
|
||||
|
||||
scc->bytes_remaining -= len;
|
||||
|
||||
padding_byte_count = len % scc->block_size_bytes;
|
||||
|
||||
if (padding_byte_count) {
|
||||
memcpy(padding_buffer, scc_block_padding, padding_byte_count);
|
||||
memcpy(to + len, padding_buffer, padding_byte_count);
|
||||
ctx->size += padding_byte_count;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"data to encrypt@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
to, ctx->size, 1);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
|
||||
struct mxc_scc *scc = ctx->scc;
|
||||
int err;
|
||||
|
||||
dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
|
||||
ablkreq->nbytes, ablkreq->src, ablkreq->dst);
|
||||
|
||||
writel(0, scc->base + SCC_SCM_ERROR_STATUS);
|
||||
|
||||
err = mxc_scc_put_data(ctx, ablkreq);
|
||||
if (err) {
|
||||
mxc_scc_ablkcipher_req_complete(req, ctx, err);
|
||||
return;
|
||||
}
|
||||
|
||||
dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
|
||||
readl(scc->base + SCC_SCM_RED_START),
|
||||
readl(scc->base + SCC_SCM_BLACK_START));
|
||||
|
||||
/* clear interrupt control registers */
|
||||
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
|
||||
scc->base + SCC_SCM_INTR_CTRL);
|
||||
|
||||
writel((ctx->size / ctx->scc->block_size_bytes) - 1,
|
||||
scc->base + SCC_SCM_LENGTH);
|
||||
|
||||
dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
|
||||
ctx->size / ctx->scc->block_size_bytes,
|
||||
(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
|
||||
scc->red_memory);
|
||||
|
||||
writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
|
||||
}
|
||||
|
||||
static irqreturn_t mxc_scc_int(int irq, void *priv)
|
||||
{
|
||||
struct crypto_async_request *req;
|
||||
struct mxc_scc_ctx *ctx;
|
||||
struct mxc_scc *scc = priv;
|
||||
int status;
|
||||
int ret;
|
||||
|
||||
status = readl(scc->base + SCC_SCM_STATUS);
|
||||
|
||||
/* clear interrupt control registers */
|
||||
writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
|
||||
|
||||
if (status & SCC_SCM_STATUS_BUSY)
|
||||
return IRQ_NONE;
|
||||
|
||||
req = scc->req;
|
||||
if (req) {
|
||||
ctx = crypto_tfm_ctx(req->tfm);
|
||||
ret = mxc_scc_get_data(ctx, req);
|
||||
if (ret != -EINPROGRESS)
|
||||
mxc_scc_ablkcipher_req_complete(req, ctx, ret);
|
||||
else
|
||||
mxc_scc_ablkcipher_next(ctx, req);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int mxc_scc_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct mxc_scc_crypto_tmpl *algt;
|
||||
|
||||
algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
|
||||
|
||||
ctx->scc = algt->scc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
|
||||
{
|
||||
struct crypto_async_request *req, *backlog;
|
||||
|
||||
if (ctx->scc->hw_busy)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&ctx->scc->lock);
|
||||
backlog = crypto_get_backlog(&ctx->scc->queue);
|
||||
req = crypto_dequeue_request(&ctx->scc->queue);
|
||||
ctx->scc->req = req;
|
||||
ctx->scc->hw_busy = true;
|
||||
spin_unlock_bh(&ctx->scc->lock);
|
||||
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
mxc_scc_ablkcipher_next(ctx, req);
|
||||
}
|
||||
|
||||
static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&ctx->scc->lock);
|
||||
ret = crypto_enqueue_request(&ctx->scc->queue, req);
|
||||
spin_unlock_bh(&ctx->scc->lock);
|
||||
|
||||
if (ret != -EINPROGRESS)
|
||||
return ret;
|
||||
|
||||
mxc_scc_dequeue_req_unlocked(ctx);
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mxc_scc_ablkcipher_req_init(req, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return mxc_scc_queue_req(ctx, &req->base);
|
||||
}
|
||||
|
||||
static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
|
||||
|
||||
return mxc_scc_des3_op(ctx, req);
|
||||
}
|
||||
|
||||
static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
|
||||
ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
|
||||
|
||||
return mxc_scc_des3_op(ctx, req);
|
||||
}
|
||||
|
||||
static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
|
||||
ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
|
||||
|
||||
return mxc_scc_des3_op(ctx, req);
|
||||
}
|
||||
|
||||
static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
|
||||
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
|
||||
ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
|
||||
ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
|
||||
|
||||
return mxc_scc_des3_op(ctx, req);
|
||||
}
|
||||
|
||||
static void mxc_scc_hw_init(struct mxc_scc *scc)
|
||||
{
|
||||
int offset;
|
||||
|
||||
offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
|
||||
|
||||
/* Fill the RED_START register */
|
||||
writel(offset, scc->base + SCC_SCM_RED_START);
|
||||
|
||||
/* Fill the BLACK_START register */
|
||||
writel(offset, scc->base + SCC_SCM_BLACK_START);
|
||||
|
||||
scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
|
||||
SCC_NON_RESERVED_OFFSET;
|
||||
|
||||
scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
|
||||
SCC_NON_RESERVED_OFFSET;
|
||||
|
||||
scc->bytes_remaining = scc->memory_size_bytes;
|
||||
}
|
||||
|
||||
static int mxc_scc_get_config(struct mxc_scc *scc)
|
||||
{
|
||||
int config;
|
||||
|
||||
config = readl(scc->base + SCC_SCM_CFG);
|
||||
|
||||
scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
|
||||
|
||||
scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
|
||||
|
||||
scc->memory_size_bytes = (scc->block_size_bytes *
|
||||
scc->black_ram_size_blocks) -
|
||||
SCC_NON_RESERVED_OFFSET;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
|
||||
{
|
||||
enum mxc_scc_state state;
|
||||
int status;
|
||||
|
||||
status = readl(scc->base + SCC_SMN_STATUS) &
|
||||
SCC_SMN_STATUS_STATE_MASK;
|
||||
|
||||
/* If in Health Check, try to bringup to secure state */
|
||||
if (status & SCC_SMN_STATE_HEALTH_CHECK) {
|
||||
/*
|
||||
* Write a simple algorithm to the Algorithm Sequence
|
||||
* Checker (ASC)
|
||||
*/
|
||||
writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
|
||||
writel(0x5555, scc->base + SCC_SMN_SEQ_END);
|
||||
writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
|
||||
|
||||
status = readl(scc->base + SCC_SMN_STATUS) &
|
||||
SCC_SMN_STATUS_STATE_MASK;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case SCC_SMN_STATE_NON_SECURE:
|
||||
case SCC_SMN_STATE_SECURE:
|
||||
state = SCC_STATE_OK;
|
||||
break;
|
||||
case SCC_SMN_STATE_FAIL:
|
||||
state = SCC_STATE_FAILED;
|
||||
break;
|
||||
default:
|
||||
state = SCC_STATE_UNIMPLEMENTED;
|
||||
break;
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
static struct mxc_scc_crypto_tmpl scc_ecb_des = {
|
||||
.alg = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3-scc",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mxc_scc_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mxc_scc_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.encrypt = mxc_scc_ecb_des_encrypt,
|
||||
.decrypt = mxc_scc_ecb_des_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct mxc_scc_crypto_tmpl scc_cbc_des = {
|
||||
.alg = {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc-des3-scc",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct mxc_scc_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = mxc_scc_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.encrypt = mxc_scc_cbc_des_encrypt,
|
||||
.decrypt = mxc_scc_cbc_des_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
|
||||
&scc_ecb_des,
|
||||
&scc_cbc_des,
|
||||
};
|
||||
|
||||
static int mxc_scc_crypto_register(struct mxc_scc *scc)
|
||||
{
|
||||
int i;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
|
||||
scc_crypto_algs[i]->scc = scc;
|
||||
err = crypto_register_alg(&scc_crypto_algs[i]->alg);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
while (--i >= 0)
|
||||
crypto_unregister_alg(&scc_crypto_algs[i]->alg);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mxc_scc_crypto_unregister(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
|
||||
crypto_unregister_alg(&scc_crypto_algs[i]->alg);
|
||||
}
|
||||
|
||||
static int mxc_scc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct mxc_scc *scc;
|
||||
enum mxc_scc_state state;
|
||||
int irq;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
|
||||
if (!scc)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
scc->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(scc->base))
|
||||
return PTR_ERR(scc->base);
|
||||
|
||||
scc->clk = devm_clk_get(&pdev->dev, "ipg");
|
||||
if (IS_ERR(scc->clk)) {
|
||||
dev_err(dev, "Could not get ipg clock\n");
|
||||
return PTR_ERR(scc->clk);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(scc->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* clear error status register */
|
||||
writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
|
||||
|
||||
/* clear interrupt control registers */
|
||||
writel(SCC_SCM_INTR_CTRL_CLR_INTR |
|
||||
SCC_SCM_INTR_CTRL_MASK_INTR,
|
||||
scc->base + SCC_SCM_INTR_CTRL);
|
||||
|
||||
writel(SCC_SMN_COMMAND_CLR_INTR |
|
||||
SCC_SMN_COMMAND_EN_INTR,
|
||||
scc->base + SCC_SMN_COMMAND);
|
||||
|
||||
scc->dev = dev;
|
||||
platform_set_drvdata(pdev, scc);
|
||||
|
||||
ret = mxc_scc_get_config(scc);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
state = mxc_scc_get_state(scc);
|
||||
|
||||
if (state != SCC_STATE_OK) {
|
||||
dev_err(dev, "SCC in unusable state %d\n", state);
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
mxc_scc_hw_init(scc);
|
||||
|
||||
spin_lock_init(&scc->lock);
|
||||
/* FIXME: calculate queue from RAM slots */
|
||||
crypto_init_queue(&scc->queue, 50);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
irq = platform_get_irq(pdev, i);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "failed to get irq resource: %d\n", irq);
|
||||
ret = irq;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
|
||||
IRQF_ONESHOT, dev_name(dev), scc);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = mxc_scc_crypto_register(scc);
|
||||
if (ret) {
|
||||
dev_err(dev, "could not register algorithms");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
dev_info(dev, "registered successfully.\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
clk_disable_unprepare(scc->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mxc_scc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mxc_scc *scc = platform_get_drvdata(pdev);
|
||||
|
||||
mxc_scc_crypto_unregister();
|
||||
|
||||
clk_disable_unprepare(scc->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id mxc_scc_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx25-scc", .data = NULL, },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
|
||||
|
||||
static struct platform_driver mxc_scc_driver = {
|
||||
.probe = mxc_scc_probe,
|
||||
.remove = mxc_scc_remove,
|
||||
.driver = {
|
||||
.name = "mxc-scc",
|
||||
.of_match_table = mxc_scc_dt_ids,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(mxc_scc_driver);
|
||||
MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
|
||||
MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
|
||||
return -EINPROGRESS;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
|
||||
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
|
||||
|
||||
struct crypto_async_request *backlog;
|
||||
struct crypto_async_request *arq;
|
||||
|
||||
struct dcp_sha_req_ctx *rctx;
|
||||
|
||||
struct ahash_request *req;
|
||||
int ret, fini;
|
||||
int ret;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
if (arq) {
|
||||
req = ahash_request_cast(arq);
|
||||
rctx = ahash_request_ctx(req);
|
||||
|
||||
ret = dcp_sha_req_to_buf(arq);
|
||||
fini = rctx->fini;
|
||||
arq->complete(arq, ret);
|
||||
}
|
||||
}
|
||||
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
mutex_unlock(&actx->mutex);
|
||||
|
||||
return -EINPROGRESS;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dcp_sha_update(struct ahash_request *req)
|
||||
|
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
return err;
|
||||
|
||||
shash->tfm = child_shash;
|
||||
shash->flags = crypto_ahash_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
bs = crypto_shash_blocksize(child_shash);
|
||||
ds = crypto_shash_digestsize(child_shash);
|
||||
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
|
||||
struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
ctx->enc_type = n2alg->enc_type;
|
||||
|
||||
if (keylen != (3 * DES_KEY_SIZE)) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->key_len = keylen;
|
||||
memcpy(ctx->key.des3, key, keylen);
|
||||
return 0;
|
||||
|
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
|
||||
struct nx842_workmem *workmem;
|
||||
struct nx842_scatterlist slin, slout;
|
||||
struct nx_csbcpb *csbcpb;
|
||||
int ret = 0, max_sync_size;
|
||||
int ret = 0;
|
||||
unsigned long inbuf, outbuf;
|
||||
struct vio_pfo_op op = {
|
||||
.done = NULL,
|
||||
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
|
||||
rcu_read_unlock();
|
||||
return -ENODEV;
|
||||
}
|
||||
max_sync_size = local_devdata->max_sync_size;
|
||||
dev = local_devdata->dev;
|
||||
|
||||
/* Init scatterlist */
|
||||
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
|
||||
struct nx842_workmem *workmem;
|
||||
struct nx842_scatterlist slin, slout;
|
||||
struct nx_csbcpb *csbcpb;
|
||||
int ret = 0, max_sync_size;
|
||||
int ret = 0;
|
||||
unsigned long inbuf, outbuf;
|
||||
struct vio_pfo_op op = {
|
||||
.done = NULL,
|
||||
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
|
||||
rcu_read_unlock();
|
||||
return -ENODEV;
|
||||
}
|
||||
max_sync_size = local_devdata->max_sync_size;
|
||||
dev = local_devdata->dev;
|
||||
|
||||
workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
|
||||
|
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
|
||||
unsigned int adj_slen = slen;
|
||||
u8 *src = p->in, *dst = p->out;
|
||||
u16 padding = be16_to_cpu(g->padding);
|
||||
int ret, spadding = 0, dpadding = 0;
|
||||
int ret, spadding = 0;
|
||||
ktime_t timeout;
|
||||
|
||||
if (!slen || !required_len)
|
||||
@@ -413,7 +413,6 @@ usesw:
|
||||
spadding = 0;
|
||||
dst = p->out;
|
||||
dlen = p->oremain;
|
||||
dpadding = 0;
|
||||
if (dlen < required_len) { /* have ignore bytes */
|
||||
dst = ctx->dbounce;
|
||||
dlen = BOUNCE_BUFFER_SIZE;
|
||||
|
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
|
||||
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
|
||||
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
atomic_inc(&(nx_ctx->stats->aes_ops));
|
||||
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
|
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
|
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
|
||||
desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
|
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
|
||||
if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
pr_debug("enter, keylen: %d\n", keylen);
|
||||
|
||||
/* Do we need to test against weak key? */
|
||||
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
pr_debug("enter, keylen: %d\n", keylen);
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
ctx->keylen = keylen;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return omap_des_crypt(req, FLAGS_ENCRYPT);
|
||||
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 3*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
.setkey = omap_des_setkey,
|
||||
.setkey = omap_des3_setkey,
|
||||
.encrypt = omap_des_ecb_encrypt,
|
||||
.decrypt = omap_des_ecb_decrypt,
|
||||
}
|
||||
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
|
||||
.min_keysize = 3*DES_KEY_SIZE,
|
||||
.max_keysize = 3*DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = omap_des_setkey,
|
||||
.setkey = omap_des3_setkey,
|
||||
.encrypt = omap_des_cbc_encrypt,
|
||||
.decrypt = omap_des_cbc_decrypt,
|
||||
}
|
||||
|
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
|
||||
SHASH_DESC_ON_STACK(shash, bctx->shash);
|
||||
|
||||
shash->tfm = bctx->shash;
|
||||
shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
|
||||
|
||||
return crypto_shash_init(shash) ?:
|
||||
crypto_shash_update(shash, bctx->opad, bs) ?:
|
||||
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
|
||||
SHASH_DESC_ON_STACK(shash, tfm);
|
||||
|
||||
shash->tfm = tfm;
|
||||
shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
return crypto_shash_digest(shash, data, len, out);
|
||||
}
|
||||
|
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
|
||||
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
dctx->fallback.tfm = ctx->fallback;
|
||||
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
return crypto_shash_init(&dctx->fallback);
|
||||
}
|
||||
|
||||
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
|
||||
{
|
||||
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
|
||||
|
||||
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
return crypto_shash_update(&dctx->fallback, data, length);
|
||||
}
|
||||
|
||||
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
|
||||
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
dctx->fallback.tfm = ctx->fallback;
|
||||
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
return crypto_shash_import(&dctx->fallback, in);
|
||||
}
|
||||
|
||||
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
|
||||
unsigned int leftover;
|
||||
int err;
|
||||
|
||||
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = crypto_shash_export(&dctx->fallback, &state);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
|
||||
unsigned int leftover;
|
||||
int err;
|
||||
|
||||
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = crypto_shash_export(&dctx->fallback, &state);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (len > DES3_EDE_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(!des_ekey(tmp, key)) &&
|
||||
(crypto_ablkcipher_get_flags(cipher) &
|
||||
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
@@ -771,6 +766,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the 3DES key for a block cipher transform. This also performs weak key
|
||||
* checking if the transform has requested it.
|
||||
*/
|
||||
static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, len);
|
||||
ctx->key_len = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the key for an AES block cipher. Some key lengths are not supported in
|
||||
* hardware so this must also check whether a fallback is needed.
|
||||
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
|
||||
|
||||
static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
|
||||
{
|
||||
return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
|
||||
return dev ? dev_get_drvdata(dev) : NULL;
|
||||
}
|
||||
|
||||
static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
|
||||
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_ablkcipher = {
|
||||
.setkey = spacc_des_setkey,
|
||||
.setkey = spacc_des3_setkey,
|
||||
.encrypt = spacc_ablk_encrypt,
|
||||
.decrypt = spacc_ablk_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_ablkcipher = {
|
||||
.setkey = spacc_des_setkey,
|
||||
.setkey = spacc_des3_setkey,
|
||||
.encrypt = spacc_ablk_encrypt,
|
||||
.decrypt = spacc_ablk_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
|
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
memset(ctx->ipad, 0, block_size);
|
||||
memset(ctx->opad, 0, block_size);
|
||||
shash->tfm = ctx->hash_tfm;
|
||||
shash->flags = 0x0;
|
||||
|
||||
if (auth_keylen > block_size) {
|
||||
int ret = crypto_shash_digest(shash, auth_key,
|
||||
|
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
static struct akcipher_alg rsa = {
|
||||
.encrypt = qat_rsa_enc,
|
||||
.decrypt = qat_rsa_dec,
|
||||
.sign = qat_rsa_dec,
|
||||
.verify = qat_rsa_enc,
|
||||
.set_pub_key = qat_rsa_setpubkey,
|
||||
.set_priv_key = qat_rsa_setprivkey,
|
||||
.max_size = qat_rsa_max_size,
|
||||
|
@@ -198,6 +198,25 @@ weakkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(ablk);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(ablk, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
ctx->enc_keylen = keylen;
|
||||
memcpy(ctx->enc_key, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
|
||||
{
|
||||
struct crypto_tfm *tfm =
|
||||
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
|
||||
alg->cra_ablkcipher.ivsize = def->ivsize;
|
||||
alg->cra_ablkcipher.min_keysize = def->min_keysize;
|
||||
alg->cra_ablkcipher.max_keysize = def->max_keysize;
|
||||
alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
|
||||
alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
|
||||
qce_des3_setkey : qce_ablkcipher_setkey;
|
||||
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
|
||||
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
|
||||
|
||||
|
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
static int rk_des_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
if (!des_ekey(tmp, key) &&
|
||||
(tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (keylen == DES_KEY_SIZE) {
|
||||
if (!des_ekey(tmp, key) &&
|
||||
(tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->keylen = keylen;
|
||||
memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
ctx->keylen = keylen;
|
||||
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
|
||||
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
|
||||
dev->sg_src->offset + dev->sg_src->length - ivsize;
|
||||
|
||||
/* store the iv that need to be updated in chain mode */
|
||||
if (ctx->mode & RK_CRYPTO_DEC)
|
||||
/* Store the iv that need to be updated in chain mode.
|
||||
* And update the IV buffer to contain the next IV for decryption mode.
|
||||
*/
|
||||
if (ctx->mode & RK_CRYPTO_DEC) {
|
||||
memcpy(ctx->iv, src_last_blk, ivsize);
|
||||
sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
|
||||
ivsize, dev->total - ivsize);
|
||||
}
|
||||
|
||||
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
|
||||
if (!err)
|
||||
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
|
||||
struct ablkcipher_request *req =
|
||||
ablkcipher_request_cast(dev->async_req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
|
||||
|
||||
if (ivsize == DES_BLOCK_SIZE)
|
||||
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
|
||||
ivsize);
|
||||
else if (ivsize == AES_BLOCK_SIZE)
|
||||
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
|
||||
/* Update the IV buffer to contain the next IV for encryption mode. */
|
||||
if (!(ctx->mode & RK_CRYPTO_DEC)) {
|
||||
if (dev->aligned) {
|
||||
memcpy(req->info, sg_virt(dev->sg_dst) +
|
||||
dev->sg_dst->length - ivsize, ivsize);
|
||||
} else {
|
||||
memcpy(req->info, dev->addr_vir +
|
||||
dev->count - ivsize, ivsize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void rk_update_iv(struct rk_crypto_info *dev)
|
||||
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = rk_tdes_setkey,
|
||||
.setkey = rk_des_setkey,
|
||||
.encrypt = rk_des_ecb_encrypt,
|
||||
.decrypt = rk_des_ecb_decrypt,
|
||||
}
|
||||
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = rk_tdes_setkey,
|
||||
.setkey = rk_des_setkey,
|
||||
.encrypt = rk_des_cbc_encrypt,
|
||||
.decrypt = rk_des_cbc_decrypt,
|
||||
}
|
||||
|
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
|
||||
SHASH_DESC_ON_STACK(shash, tfm);
|
||||
|
||||
shash->tfm = tfm;
|
||||
shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
return crypto_shash_digest(shash, data, len, out);
|
||||
}
|
||||
|
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
|
||||
{
|
||||
u8 state;
|
||||
|
||||
if (!IS_ENABLED(DEBUG))
|
||||
if (!__is_defined(DEBUG))
|
||||
return;
|
||||
|
||||
state = SAHARA_STATUS_GET_STATE(status);
|
||||
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!IS_ENABLED(DEBUG))
|
||||
if (!__is_defined(DEBUG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
|
||||
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!IS_ENABLED(DEBUG))
|
||||
if (!__is_defined(DEBUG))
|
||||
return;
|
||||
|
||||
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
|
||||
|
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
|
||||
depends on ARCH_STM32
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_ENGINE
|
||||
select CRYPTO_DES
|
||||
help
|
||||
This enables support for the CRYP (AES/DES/TDES) hw accelerator which
|
||||
can be found on STMicroelectronics STM32 SOC.
|
||||
|
@@ -137,7 +137,6 @@ struct stm32_cryp {
|
||||
|
||||
struct crypto_engine *engine;
|
||||
|
||||
struct mutex lock; /* protects req / areq */
|
||||
struct ablkcipher_request *req;
|
||||
struct aead_request *areq;
|
||||
|
||||
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
|
||||
}
|
||||
}
|
||||
|
||||
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
|
||||
{
|
||||
struct ablkcipher_request *req = cryp->req;
|
||||
u32 *tmp = req->info;
|
||||
|
||||
if (!tmp)
|
||||
return;
|
||||
|
||||
*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
|
||||
*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
|
||||
|
||||
if (is_aes(cryp)) {
|
||||
*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
|
||||
*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
|
||||
}
|
||||
}
|
||||
|
||||
static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
|
||||
{
|
||||
unsigned int i;
|
||||
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
|
||||
/* Phase 4 : output tag */
|
||||
err = stm32_cryp_read_auth_tag(cryp);
|
||||
|
||||
if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
|
||||
stm32_cryp_get_iv(cryp);
|
||||
|
||||
if (cryp->sgs_copied) {
|
||||
void *buf_in, *buf_out;
|
||||
int pages, len;
|
||||
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
|
||||
pm_runtime_mark_last_busy(cryp->dev);
|
||||
pm_runtime_put_autosuspend(cryp->dev);
|
||||
|
||||
if (is_gcm(cryp) || is_ccm(cryp)) {
|
||||
if (is_gcm(cryp) || is_ccm(cryp))
|
||||
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
|
||||
cryp->areq = NULL;
|
||||
} else {
|
||||
else
|
||||
crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
|
||||
err);
|
||||
cryp->req = NULL;
|
||||
}
|
||||
|
||||
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
|
||||
|
||||
mutex_unlock(&cryp->lock);
|
||||
}
|
||||
|
||||
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
|
||||
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (keylen != DES_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
else
|
||||
return stm32_cryp_setkey(tfm, key, keylen);
|
||||
|
||||
if ((crypto_ablkcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
|
||||
unlikely(!des_ekey(tmp, key))) {
|
||||
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return stm32_cryp_setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
if (keylen != (3 * DES_KEY_SIZE))
|
||||
return -EINVAL;
|
||||
else
|
||||
return stm32_cryp_setkey(tfm, key, keylen);
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(tfm);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(tfm, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
return stm32_cryp_setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
||||
if (!cryp)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&cryp->lock);
|
||||
|
||||
rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
|
||||
rctx->mode &= FLG_MODE_MASK;
|
||||
|
||||
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
||||
|
||||
if (req) {
|
||||
cryp->req = req;
|
||||
cryp->areq = NULL;
|
||||
cryp->total_in = req->nbytes;
|
||||
cryp->total_out = cryp->total_in;
|
||||
} else {
|
||||
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
||||
* <---------- total_out ----------------->
|
||||
*/
|
||||
cryp->areq = areq;
|
||||
cryp->req = NULL;
|
||||
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
|
||||
cryp->total_in = areq->assoclen + areq->cryptlen;
|
||||
if (is_encrypt(cryp))
|
||||
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
||||
if (cryp->in_sg_len < 0) {
|
||||
dev_err(cryp->dev, "Cannot get in_sg_len\n");
|
||||
ret = cryp->in_sg_len;
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
|
||||
if (cryp->out_sg_len < 0) {
|
||||
dev_err(cryp->dev, "Cannot get out_sg_len\n");
|
||||
ret = cryp->out_sg_len;
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = stm32_cryp_copy_sgs(cryp);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
|
||||
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
|
||||
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
|
||||
}
|
||||
|
||||
ret = stm32_cryp_hw_init(cryp);
|
||||
out:
|
||||
if (ret)
|
||||
mutex_unlock(&cryp->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
|
||||
|
||||
cryp->dev = dev;
|
||||
|
||||
mutex_init(&cryp->lock);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
cryp->regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(cryp->regs))
|
||||
|
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
|
||||
u32 dma_mode;
|
||||
u32 dma_maxburst;
|
||||
|
||||
spinlock_t lock; /* lock to protect queue */
|
||||
|
||||
struct ahash_request *req;
|
||||
struct crypto_engine *engine;
|
||||
|
||||
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
|
||||
|
||||
pm_runtime_get_sync(hdev->dev);
|
||||
|
||||
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
|
||||
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
|
||||
cpu_relax();
|
||||
|
||||
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
|
||||
|
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
|
||||
if (!areq->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (!areq->iv) {
|
||||
dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!areq->src || !areq->dst) {
|
||||
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
|
||||
return -EINVAL;
|
||||
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
struct scatterlist *out_sg = areq->dst;
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||
struct sun4i_ss_alg_template *algt;
|
||||
u32 mode = ctx->mode;
|
||||
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
|
||||
u32 rx_cnt = SS_RX_DEFAULT;
|
||||
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
unsigned int obo = 0; /* offset in bufo*/
|
||||
unsigned int obl = 0; /* length of data in bufo */
|
||||
unsigned long flags;
|
||||
bool need_fallback;
|
||||
|
||||
if (!areq->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (!areq->iv) {
|
||||
dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!areq->src || !areq->dst) {
|
||||
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
|
||||
if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
|
||||
need_fallback = true;
|
||||
|
||||
/*
|
||||
* if we have only SGs with size multiple of 4,
|
||||
* we can use the SS optimized function
|
||||
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
out_sg = sg_next(out_sg);
|
||||
}
|
||||
|
||||
if (no_chunk == 1)
|
||||
if (no_chunk == 1 && !need_fallback)
|
||||
return sun4i_ss_opti_poll(areq);
|
||||
|
||||
if (need_fallback) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL,
|
||||
NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (ctx->mode & SS_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ss->slock, flags);
|
||||
|
||||
for (i = 0; i < op->keylen; i += 4)
|
||||
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
|
||||
struct sun4i_ss_alg_template *algt;
|
||||
const char *name = crypto_tfm_alg_name(tfm);
|
||||
|
||||
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
|
||||
|
||||
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
|
||||
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
|
||||
sizeof(struct sun4i_cipher_req_ctx));
|
||||
|
||||
op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(op->fallback_tfm)) {
|
||||
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
|
||||
name, PTR_ERR(op->fallback_tfm));
|
||||
return PTR_ERR(op->fallback_tfm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
|
||||
crypto_free_sync_skcipher(op->fallback_tfm);
|
||||
}
|
||||
|
||||
/* check and set the AES key, prepare the mode to be used */
|
||||
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
}
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
return 0;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
/* check and set the DES key, prepare the mode to be used */
|
||||
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
return 0;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
}
|
||||
|
||||
/* check and set the 3DES key, prepare the mode to be used */
|
||||
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
struct sun4i_ss_ctx *ss = op->ss;
|
||||
int err;
|
||||
|
||||
err = des3_verify_key(tfm, key);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
|
||||
dev_err(ss->dev, "Invalid keylen %u\n", keylen);
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
op->keylen = keylen;
|
||||
memcpy(op->key, key, keylen);
|
||||
return 0;
|
||||
|
||||
crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
|
||||
|
||||
}
|
||||
|
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.cra_driver_name = "cbc-aes-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.decrypt = sun4i_ss_ecb_aes_decrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.cra_driver_name = "cbc-des-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.cra_driver_name = "ecb-des-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.cra_driver_name = "cbc-des3-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
|
||||
.decrypt = sun4i_ss_ecb_des3_decrypt,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.base = {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3-sun4i-ss",
|
||||
.cra_priority = 300,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_init = sun4i_ss_cipher_init,
|
||||
.cra_exit = sun4i_ss_cipher_exit,
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
|
||||
}
|
||||
} else {
|
||||
/* Since we have the flag final, we can go up to modulo 4 */
|
||||
end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
|
||||
if (areq->nbytes < 4)
|
||||
end = 0;
|
||||
else
|
||||
end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
|
||||
}
|
||||
|
||||
/* TODO if SGlen % 4 and !op->len then DMA */
|
||||
|
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
|
||||
u32 keylen;
|
||||
u32 keymode;
|
||||
struct sun4i_ss_ctx *ss;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
};
|
||||
|
||||
struct sun4i_cipher_req_ctx {
|
||||
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
|
||||
int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
|
||||
|
||||
int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
|
||||
void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
|
||||
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
|
@@ -913,6 +913,54 @@ badkey:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int aead_des3_setkey(struct crypto_aead *authenc,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct device *dev = ctx->dev;
|
||||
struct crypto_authenc_keys keys;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (unlikely(err))
|
||||
goto badkey;
|
||||
|
||||
err = -EINVAL;
|
||||
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
if (keys.enckeylen != DES3_EDE_KEY_SIZE)
|
||||
goto badkey;
|
||||
|
||||
flags = crypto_aead_get_flags(authenc);
|
||||
err = __des3_verify_key(&flags, keys.enckey);
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(authenc, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ctx->keylen)
|
||||
dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
|
||||
|
||||
memcpy(ctx->key, keys.authkey, keys.authkeylen);
|
||||
memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
|
||||
|
||||
ctx->keylen = keys.authkeylen + keys.enckeylen;
|
||||
ctx->enckeylen = keys.enckeylen;
|
||||
ctx->authkeylen = keys.authkeylen;
|
||||
ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
out:
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* talitos_edesc - s/w-extended descriptor
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
@@ -1527,19 +1575,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
||||
{
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
struct device *dev = ctx->dev;
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (keylen > TALITOS_MAX_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
|
||||
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
|
||||
!des_ekey(tmp, key)) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->keylen)
|
||||
dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
|
||||
@@ -1552,6 +1587,37 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
|
||||
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
|
||||
!des_ekey(tmp, key)) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ablkcipher_setkey(cipher, key, keylen);
|
||||
}
|
||||
|
||||
static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
return ablkcipher_setkey(cipher, key, keylen);
|
||||
}
|
||||
|
||||
static void common_nonsnoop_unmap(struct device *dev,
|
||||
struct talitos_edesc *edesc,
|
||||
struct ablkcipher_request *areq)
|
||||
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
},
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
.setkey = aead_des3_setkey,
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
|
||||
DESC_HDR_SEL0_DEU |
|
||||
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des3_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des3_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
alg->cra_init = talitos_cra_init;
|
||||
alg->cra_exit = talitos_cra_exit;
|
||||
alg->cra_type = &crypto_ablkcipher_type;
|
||||
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
|
||||
alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
|
||||
ablkcipher_setkey;
|
||||
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
|
||||
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
|
||||
break;
|
||||
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
alg = &t_alg->algt.alg.aead.base;
|
||||
alg->cra_exit = talitos_cra_exit;
|
||||
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
|
||||
t_alg->algt.alg.aead.setkey = aead_setkey;
|
||||
t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
|
||||
aead_setkey;
|
||||
t_alg->algt.alg.aead.encrypt = aead_encrypt;
|
||||
t_alg->algt.alg.aead.decrypt = aead_decrypt;
|
||||
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
|
||||
|
@@ -3,11 +3,7 @@
|
||||
# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
|
||||
# * License terms: GNU General Public License (GPL) version 2 */
|
||||
|
||||
ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
|
||||
CFLAGS_cryp_core.o := -DDEBUG
|
||||
CFLAGS_cryp.o := -DDEBUG
|
||||
CFLAGS_cryp_irq.o := -DDEBUG
|
||||
endif
|
||||
ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
|
||||
ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
|
||||
|
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
u32 *flags = &cipher->base.crt_flags;
|
||||
const u32 *K = (const u32 *)key;
|
||||
u32 tmp[DES3_EDE_EXPKEY_WORDS];
|
||||
int i, ret;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
pr_debug(DEV_DBG_NAME " [%s]", __func__);
|
||||
if (keylen != DES3_EDE_KEY_SIZE) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Checking key interdependency for weak key detection. */
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < 3; i++) {
|
||||
ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
|
||||
if (unlikely(ret == 0) &&
|
||||
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
flags = crypto_ablkcipher_get_flags(cipher);
|
||||
err = __des3_verify_key(&flags, key);
|
||||
if (unlikely(err)) {
|
||||
crypto_ablkcipher_set_flags(cipher, flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
@@ -1216,57 +1195,6 @@ static struct cryp_algo_template cryp_algs[] = {
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.algomode = CRYP_ALGO_DES_ECB,
|
||||
.crypto = {
|
||||
.cra_name = "des",
|
||||
.cra_driver_name = "des-ux500",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cryp_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_init = cryp_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.setkey = des_ablkcipher_setkey,
|
||||
.encrypt = cryp_blk_encrypt,
|
||||
.decrypt = cryp_blk_decrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
{
|
||||
.algomode = CRYP_ALGO_TDES_ECB,
|
||||
.crypto = {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-ux500",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cryp_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_init = cryp_cra_init,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des_ablkcipher_setkey,
|
||||
.encrypt = cryp_blk_encrypt,
|
||||
.decrypt = cryp_blk_decrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
.algomode = CRYP_ALGO_DES_ECB,
|
||||
.crypto = {
|
||||
|
@@ -23,9 +23,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
|
||||
} else {
|
||||
preempt_disable();
|
||||
|
@@ -23,9 +23,10 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/skcipher.h>
|
||||
|
||||
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct p8_aes_cbc_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct p8_aes_cbc_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user