Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Remove VLA usage - Add cryptostat user-space interface - Add notifier for new crypto algorithms Algorithms: - Add OFB mode - Remove speck Drivers: - Remove x86/sha*-mb as they are buggy - Remove pcbc(aes) from x86/aesni - Improve performance of arm/ghash-ce by up to 85% - Implement CTS-CBC in arm64/aes-blk, faster by up to 50% - Remove PMULL based arm64/crc32 driver - Use PMULL in arm64/crct10dif - Add aes-ctr support in s5p-sss - Add caam/qi2 driver Others: - Pick better transform if one becomes available in crc-t10dif" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits) crypto: chelsio - Update ntx queue received from cxgb4 crypto: ccree - avoid implicit enum conversion crypto: caam - add SPDX license identifier to all files crypto: caam/qi - simplify CGR allocation, freeing crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static crypto: arm64/aes-blk - ensure XTS mask is always loaded crypto: testmgr - fix sizeof() on COMP_BUF_SIZE crypto: chtls - remove set but not used variable 'csk' crypto: axis - fix platform_no_drv_owner.cocci warnings crypto: x86/aes-ni - fix build error following fpu template removal crypto: arm64/aes - fix handling sub-block CTS-CBC inputs crypto: caam/qi2 - avoid double export crypto: mxs-dcp - Fix AES issues crypto: mxs-dcp - Fix SHA null hashes and output length crypto: mxs-dcp - Implement sha import/export crypto: aegis/generic - fix for big endian systems crypto: morus/generic - fix for big endian systems crypto: lrw - fix rebase error after out of bounds fix crypto: cavium/nitrox - use pci_alloc_irq_vectors() while enabling MSI-X. crypto: cavium/nitrox - NITROX command queue changes. ...
This commit is contained in:
101
crypto/Kconfig
101
crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
|
||||
converts an arbitrary synchronous software crypto algorithm
|
||||
into an asynchronous algorithm that executes in a kernel thread.
|
||||
|
||||
config CRYPTO_MCRYPTD
|
||||
tristate "Software async multi-buffer crypto daemon"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_WORKQUEUE
|
||||
help
|
||||
This is a generic software asynchronous crypto daemon that
|
||||
provides the kernel thread to assist multi-buffer crypto
|
||||
algorithms for submitting jobs and flushing jobs in multi-buffer
|
||||
crypto algorithms. Multi-buffer crypto algorithms are executed
|
||||
in the context of this kernel thread and drivers can post
|
||||
their crypto request asynchronously to be processed by this daemon.
|
||||
|
||||
config CRYPTO_AUTHENC
|
||||
tristate "Authenc support"
|
||||
select CRYPTO_AEAD
|
||||
@@ -470,6 +456,18 @@ config CRYPTO_LRW
|
||||
The first 128, 192 or 256 bits in the key are used for AES and the
|
||||
rest is used to tie each cipher block to its logical position.
|
||||
|
||||
config CRYPTO_OFB
|
||||
tristate "OFB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
OFB: the Output Feedback mode makes a block cipher into a synchronous
|
||||
stream cipher. It generates keystream blocks, which are then XORed
|
||||
with the plaintext blocks to get the ciphertext. Flipping a bit in the
|
||||
ciphertext produces a flipped bit in the plaintext at the same
|
||||
location. This property allows many error correcting codes to function
|
||||
normally even when applied before encryption.
|
||||
|
||||
config CRYPTO_PCBC
|
||||
tristate "PCBC support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
@@ -848,54 +846,6 @@ config CRYPTO_SHA1_PPC_SPE
|
||||
SHA-1 secure hash standard (DFIPS 180-4) implemented
|
||||
using powerpc SPE SIMD instruction set.
|
||||
|
||||
config CRYPTO_SHA1_MB
|
||||
tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MCRYPTD
|
||||
help
|
||||
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||
using multi-buffer technique. This algorithm computes on
|
||||
multiple data lanes concurrently with SIMD instructions for
|
||||
better throughput. It should not be enabled by default but
|
||||
used when there is significant amount of work to keep the keep
|
||||
the data lanes filled to get performance benefit. If the data
|
||||
lanes remain unfilled, a flush operation will be initiated to
|
||||
process the crypto jobs, adding a slight latency.
|
||||
|
||||
config CRYPTO_SHA256_MB
|
||||
tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MCRYPTD
|
||||
help
|
||||
SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||
using multi-buffer technique. This algorithm computes on
|
||||
multiple data lanes concurrently with SIMD instructions for
|
||||
better throughput. It should not be enabled by default but
|
||||
used when there is significant amount of work to keep the keep
|
||||
the data lanes filled to get performance benefit. If the data
|
||||
lanes remain unfilled, a flush operation will be initiated to
|
||||
process the crypto jobs, adding a slight latency.
|
||||
|
||||
config CRYPTO_SHA512_MB
|
||||
tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_MCRYPTD
|
||||
help
|
||||
SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||
using multi-buffer technique. This algorithm computes on
|
||||
multiple data lanes concurrently with SIMD instructions for
|
||||
better throughput. It should not be enabled by default but
|
||||
used when there is significant amount of work to keep the keep
|
||||
the data lanes filled to get performance benefit. If the data
|
||||
lanes remain unfilled, a flush operation will be initiated to
|
||||
process the crypto jobs, adding a slight latency.
|
||||
|
||||
config CRYPTO_SHA256
|
||||
tristate "SHA224 and SHA256 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
@@ -1133,7 +1083,7 @@ config CRYPTO_AES_NI_INTEL
|
||||
|
||||
In addition to AES cipher algorithm support, the acceleration
|
||||
for some popular block cipher mode is supported too, including
|
||||
ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
|
||||
ECB, CBC, LRW, XTS. The 64 bit version has additional
|
||||
acceleration for CTR.
|
||||
|
||||
config CRYPTO_AES_SPARC64
|
||||
@@ -1590,20 +1540,6 @@ config CRYPTO_SM4
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_SPECK
|
||||
tristate "Speck cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
help
|
||||
Speck is a lightweight block cipher that is tuned for optimal
|
||||
performance in software (rather than hardware).
|
||||
|
||||
Speck may not be as secure as AES, and should only be used on systems
|
||||
where AES is not fast enough.
|
||||
|
||||
See also: <https://eprint.iacr.org/2013/404.pdf>
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_TEA
|
||||
tristate "TEA, XTEA and XETA cipher algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
@@ -1875,6 +1811,17 @@ config CRYPTO_USER_API_AEAD
|
||||
This option enables the user-spaces interface for AEAD
|
||||
cipher algorithms.
|
||||
|
||||
config CRYPTO_STATS
|
||||
bool "Crypto usage statistics for User-space"
|
||||
help
|
||||
This option enables the gathering of crypto stats.
|
||||
This will collect:
|
||||
- encrypt/decrypt size and numbers of symmeric operations
|
||||
- compress/decompress size and numbers of compress operations
|
||||
- size and numbers of hash operations
|
||||
- encrypt/decrypt/sign/verify numbers for asymmetric operations
|
||||
- generate/seed numbers for rng operations
|
||||
|
||||
config CRYPTO_HASH_INFO
|
||||
bool
|
||||
|
||||
|
@@ -54,6 +54,7 @@ cryptomgr-y := algboss.o testmgr.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
|
||||
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
|
||||
crypto_user-y := crypto_user_base.o crypto_user_stat.o
|
||||
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
|
||||
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
|
||||
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
|
||||
@@ -93,7 +94,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
|
||||
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
|
||||
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
|
||||
obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
|
||||
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
|
||||
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
|
||||
@@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
|
||||
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
|
||||
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
|
||||
obj-$(CONFIG_CRYPTO_SEED) += seed.o
|
||||
obj-$(CONFIG_CRYPTO_SPECK) += speck.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
|
||||
@@ -143,6 +142,7 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
|
||||
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
|
||||
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
|
||||
|
||||
ecdh_generic-y := ecc.o
|
||||
ecdh_generic-y += ecdh.o
|
||||
|
@@ -21,7 +21,7 @@
|
||||
|
||||
union aegis_block {
|
||||
__le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
|
||||
u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)];
|
||||
__le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)];
|
||||
u8 bytes[AEGIS_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
@@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst,
|
||||
const union aegis_block *src,
|
||||
const union aegis_block *key)
|
||||
{
|
||||
u32 *d = dst->words32;
|
||||
const u8 *s = src->bytes;
|
||||
const u32 *k = key->words32;
|
||||
const u32 *t0 = crypto_ft_tab[0];
|
||||
const u32 *t1 = crypto_ft_tab[1];
|
||||
const u32 *t2 = crypto_ft_tab[2];
|
||||
const u32 *t3 = crypto_ft_tab[3];
|
||||
u32 d0, d1, d2, d3;
|
||||
|
||||
d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0];
|
||||
d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1];
|
||||
d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2];
|
||||
d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3];
|
||||
d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]];
|
||||
d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]];
|
||||
d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]];
|
||||
d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]];
|
||||
|
||||
d[0] = d0;
|
||||
d[1] = d1;
|
||||
d[2] = d2;
|
||||
d[3] = d3;
|
||||
dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
|
||||
dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
|
||||
dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2];
|
||||
dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3];
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_AEGIS_H */
|
||||
|
@@ -364,24 +364,35 @@ static int crypto_ahash_op(struct ahash_request *req,
|
||||
|
||||
int crypto_ahash_final(struct ahash_request *req)
|
||||
{
|
||||
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
|
||||
int ret;
|
||||
|
||||
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
|
||||
crypto_stat_ahash_final(req, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_final);
|
||||
|
||||
int crypto_ahash_finup(struct ahash_request *req)
|
||||
{
|
||||
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
|
||||
int ret;
|
||||
|
||||
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
|
||||
crypto_stat_ahash_final(req, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
|
||||
|
||||
int crypto_ahash_digest(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
int ret;
|
||||
|
||||
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||
return -ENOKEY;
|
||||
|
||||
return crypto_ahash_op(req, tfm->digest);
|
||||
ret = -ENOKEY;
|
||||
else
|
||||
ret = crypto_ahash_op(req, tfm->digest);
|
||||
crypto_stat_ahash_final(req, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
||||
|
||||
@@ -550,8 +561,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->halg.base;
|
||||
|
||||
if (alg->halg.digestsize > PAGE_SIZE / 8 ||
|
||||
alg->halg.statesize > PAGE_SIZE / 8 ||
|
||||
if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
|
||||
alg->halg.statesize > HASH_MAX_STATESIZE ||
|
||||
alg->halg.statesize == 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -57,9 +57,14 @@ static int crypto_check_alg(struct crypto_alg *alg)
|
||||
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (alg->cra_blocksize > PAGE_SIZE / 8)
|
||||
/* General maximums for all algs. */
|
||||
if (alg->cra_alignmask > MAX_ALGAPI_ALIGNMASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (alg->cra_blocksize > MAX_ALGAPI_BLOCKSIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Lower maximums for specific alg types. */
|
||||
if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_CIPHER) {
|
||||
if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
|
||||
@@ -253,6 +258,14 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
|
||||
list_add(&alg->cra_list, &crypto_alg_list);
|
||||
list_add(&larval->alg.cra_list, &crypto_alg_list);
|
||||
|
||||
atomic_set(&alg->encrypt_cnt, 0);
|
||||
atomic_set(&alg->decrypt_cnt, 0);
|
||||
atomic64_set(&alg->encrypt_tlen, 0);
|
||||
atomic64_set(&alg->decrypt_tlen, 0);
|
||||
atomic_set(&alg->verify_cnt, 0);
|
||||
atomic_set(&alg->cipher_err_cnt, 0);
|
||||
atomic_set(&alg->sign_cnt, 0);
|
||||
|
||||
out:
|
||||
return larval;
|
||||
|
||||
@@ -367,6 +380,8 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
|
||||
|
||||
err = wait_for_completion_killable(&larval->completion);
|
||||
WARN_ON(err);
|
||||
if (!err)
|
||||
crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
|
||||
|
||||
out:
|
||||
crypto_larval_kill(&larval->alg);
|
||||
|
@@ -274,6 +274,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
|
||||
return cryptomgr_schedule_probe(data);
|
||||
case CRYPTO_MSG_ALG_REGISTER:
|
||||
return cryptomgr_schedule_test(data);
|
||||
case CRYPTO_MSG_ALG_LOADED:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
@@ -42,7 +42,7 @@
|
||||
|
||||
struct aead_tfm {
|
||||
struct crypto_aead *aead;
|
||||
struct crypto_skcipher *null_tfm;
|
||||
struct crypto_sync_skcipher *null_tfm;
|
||||
};
|
||||
|
||||
static inline bool aead_sufficient_data(struct sock *sk)
|
||||
@@ -75,13 +75,13 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
return af_alg_sendmsg(sock, msg, size, ivsize);
|
||||
}
|
||||
|
||||
static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
|
||||
static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst, unsigned int len)
|
||||
{
|
||||
SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
|
||||
|
||||
skcipher_request_set_tfm(skreq, null_tfm);
|
||||
skcipher_request_set_sync_tfm(skreq, null_tfm);
|
||||
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
|
||||
@@ -99,7 +99,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct aead_tfm *aeadc = pask->private;
|
||||
struct crypto_aead *tfm = aeadc->aead;
|
||||
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
|
||||
struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
|
||||
unsigned int i, as = crypto_aead_authsize(tfm);
|
||||
struct af_alg_async_req *areq;
|
||||
struct af_alg_tsgl *tsgl, *tmp;
|
||||
@@ -478,7 +478,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct aead_tfm *tfm;
|
||||
struct crypto_aead *aead;
|
||||
struct crypto_skcipher *null_tfm;
|
||||
struct crypto_sync_skcipher *null_tfm;
|
||||
|
||||
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
|
||||
if (!tfm)
|
||||
|
@@ -239,7 +239,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct hash_ctx *ctx = ask->private;
|
||||
struct ahash_request *req = &ctx->req;
|
||||
char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
|
||||
char state[HASH_MAX_STATESIZE];
|
||||
struct sock *sk2;
|
||||
struct alg_sock *ask2;
|
||||
struct hash_ctx *ctx2;
|
||||
|
@@ -33,7 +33,7 @@ struct authenc_instance_ctx {
|
||||
struct crypto_authenc_ctx {
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_request_ctx {
|
||||
@@ -185,9 +185,9 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
|
||||
skcipher_request_set_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_sync_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
|
||||
@@ -318,7 +318,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
|
@@ -36,7 +36,7 @@ struct crypto_authenc_esn_ctx {
|
||||
unsigned int reqoff;
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_esn_request_ctx {
|
||||
@@ -183,9 +183,9 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
|
||||
skcipher_request_set_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_sync_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
|
||||
@@ -341,7 +341,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
|
@@ -50,7 +50,10 @@ struct crypto_ccm_req_priv_ctx {
|
||||
u32 flags;
|
||||
struct scatterlist src[3];
|
||||
struct scatterlist dst[3];
|
||||
struct skcipher_request skreq;
|
||||
union {
|
||||
struct ahash_request ahreq;
|
||||
struct skcipher_request skreq;
|
||||
};
|
||||
};
|
||||
|
||||
struct cbcmac_tfm_ctx {
|
||||
@@ -181,7 +184,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
|
||||
struct ahash_request *ahreq = &pctx->ahreq;
|
||||
unsigned int assoclen = req->assoclen;
|
||||
struct scatterlist sg[3];
|
||||
u8 *odata = pctx->odata;
|
||||
@@ -427,7 +430,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
align + sizeof(struct crypto_ccm_req_priv_ctx) +
|
||||
crypto_skcipher_reqsize(ctr));
|
||||
max(crypto_ahash_reqsize(mac), crypto_skcipher_reqsize(ctr)));
|
||||
|
||||
return 0;
|
||||
|
||||
|
@@ -18,20 +18,21 @@
|
||||
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int bytes)
|
||||
{
|
||||
u32 stream[CHACHA20_BLOCK_WORDS];
|
||||
/* aligned to potentially speed up crypto_xor() */
|
||||
u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long));
|
||||
|
||||
if (dst != src)
|
||||
memcpy(dst, src, bytes);
|
||||
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE) {
|
||||
chacha20_block(state, stream);
|
||||
crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
|
||||
crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
|
||||
bytes -= CHACHA20_BLOCK_SIZE;
|
||||
dst += CHACHA20_BLOCK_SIZE;
|
||||
}
|
||||
if (bytes) {
|
||||
chacha20_block(state, stream);
|
||||
crypto_xor(dst, (const u8 *)stream, bytes);
|
||||
crypto_xor(dst, stream, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -76,7 +76,7 @@ struct cryptd_blkcipher_request_ctx {
|
||||
|
||||
struct cryptd_skcipher_ctx {
|
||||
atomic_t refcnt;
|
||||
struct crypto_skcipher *child;
|
||||
struct crypto_sync_skcipher *child;
|
||||
};
|
||||
|
||||
struct cryptd_skcipher_request_ctx {
|
||||
@@ -449,14 +449,16 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
struct crypto_sync_skcipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||
crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(child,
|
||||
crypto_skcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_skcipher_setkey(child, key, keylen);
|
||||
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
|
||||
err = crypto_sync_skcipher_setkey(child, key, keylen);
|
||||
crypto_skcipher_set_flags(parent,
|
||||
crypto_sync_skcipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
@@ -483,13 +485,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
|
||||
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, child);
|
||||
struct crypto_sync_skcipher *child = ctx->child;
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
skcipher_request_set_tfm(subreq, child);
|
||||
skcipher_request_set_sync_tfm(subreq, child);
|
||||
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
@@ -511,13 +513,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
|
||||
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, child);
|
||||
struct crypto_sync_skcipher *child = ctx->child;
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
skcipher_request_set_tfm(subreq, child);
|
||||
skcipher_request_set_sync_tfm(subreq, child);
|
||||
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
|
||||
@@ -568,7 +570,7 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
ctx->child = (struct crypto_sync_skcipher *)cipher;
|
||||
crypto_skcipher_set_reqsize(
|
||||
tfm, sizeof(struct cryptd_skcipher_request_ctx));
|
||||
return 0;
|
||||
@@ -578,7 +580,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->child);
|
||||
crypto_free_sync_skcipher(ctx->child);
|
||||
}
|
||||
|
||||
static void cryptd_skcipher_free(struct skcipher_instance *inst)
|
||||
@@ -1243,7 +1245,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
|
||||
{
|
||||
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
|
||||
|
||||
return ctx->child;
|
||||
return &ctx->child->base;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
|
||||
|
||||
|
@@ -26,7 +26,7 @@
|
||||
#include <linux/string.h>
|
||||
|
||||
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
|
||||
static struct crypto_skcipher *crypto_default_null_skcipher;
|
||||
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
|
||||
static int crypto_default_null_skcipher_refcnt;
|
||||
|
||||
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
|
||||
@@ -152,16 +152,15 @@ MODULE_ALIAS_CRYPTO("compress_null");
|
||||
MODULE_ALIAS_CRYPTO("digest_null");
|
||||
MODULE_ALIAS_CRYPTO("cipher_null");
|
||||
|
||||
struct crypto_skcipher *crypto_get_default_null_skcipher(void)
|
||||
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
|
||||
{
|
||||
struct crypto_skcipher *tfm;
|
||||
struct crypto_sync_skcipher *tfm;
|
||||
|
||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||
tfm = crypto_default_null_skcipher;
|
||||
|
||||
if (!tfm) {
|
||||
tfm = crypto_alloc_skcipher("ecb(cipher_null)",
|
||||
0, CRYPTO_ALG_ASYNC);
|
||||
tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
goto unlock;
|
||||
|
||||
@@ -181,7 +180,7 @@ void crypto_put_default_null_skcipher(void)
|
||||
{
|
||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||
if (!--crypto_default_null_skcipher_refcnt) {
|
||||
crypto_free_skcipher(crypto_default_null_skcipher);
|
||||
crypto_free_sync_skcipher(crypto_default_null_skcipher);
|
||||
crypto_default_null_skcipher = NULL;
|
||||
}
|
||||
mutex_unlock(&crypto_default_null_skcipher_lock);
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <crypto/internal/cryptouser.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@@ -37,7 +38,7 @@
|
||||
static DEFINE_MUTEX(crypto_cfg_mutex);
|
||||
|
||||
/* The crypto netlink socket */
|
||||
static struct sock *crypto_nlsk;
|
||||
struct sock *crypto_nlsk;
|
||||
|
||||
struct crypto_dump_info {
|
||||
struct sk_buff *in_skb;
|
||||
@@ -46,7 +47,7 @@ struct crypto_dump_info {
|
||||
u16 nlmsg_flags;
|
||||
};
|
||||
|
||||
static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
|
||||
struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
|
||||
{
|
||||
struct crypto_alg *q, *alg = NULL;
|
||||
|
||||
@@ -461,6 +462,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
|
||||
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
|
||||
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
|
||||
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
|
||||
[CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
|
||||
};
|
||||
|
||||
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
|
||||
@@ -481,6 +483,9 @@ static const struct crypto_link {
|
||||
.dump = crypto_dump_report,
|
||||
.done = crypto_dump_report_done},
|
||||
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
|
||||
[CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat,
|
||||
.dump = crypto_dump_reportstat,
|
||||
.done = crypto_dump_reportstat_done},
|
||||
};
|
||||
|
||||
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
463
crypto/crypto_user_stat.c
Normal file
463
crypto/crypto_user_stat.c
Normal file
@@ -0,0 +1,463 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Crypto user configuration API.
|
||||
*
|
||||
* Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptouser.h>
|
||||
#include <linux/sched.h>
|
||||
#include <net/netlink.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/internal/rng.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include <crypto/internal/cryptouser.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
|
||||
|
||||
static DEFINE_MUTEX(crypto_cfg_mutex);
|
||||
|
||||
extern struct sock *crypto_nlsk;
|
||||
|
||||
struct crypto_dump_info {
|
||||
struct sk_buff *in_skb;
|
||||
struct sk_buff *out_skb;
|
||||
u32 nlmsg_seq;
|
||||
u16 nlmsg_flags;
|
||||
};
|
||||
|
||||
static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat raead;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strncpy(raead.type, "aead", sizeof(raead.type));
|
||||
|
||||
v32 = atomic_read(&alg->encrypt_cnt);
|
||||
raead.stat_encrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->encrypt_tlen);
|
||||
raead.stat_encrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->decrypt_cnt);
|
||||
raead.stat_decrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->decrypt_tlen);
|
||||
raead.stat_decrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->aead_err_cnt);
|
||||
raead.stat_aead_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_AEAD,
|
||||
sizeof(struct crypto_stat), &raead))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rcipher;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
|
||||
|
||||
v32 = atomic_read(&alg->encrypt_cnt);
|
||||
rcipher.stat_encrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->encrypt_tlen);
|
||||
rcipher.stat_encrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->decrypt_cnt);
|
||||
rcipher.stat_decrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->decrypt_tlen);
|
||||
rcipher.stat_decrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->cipher_err_cnt);
|
||||
rcipher.stat_cipher_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER,
|
||||
sizeof(struct crypto_stat), &rcipher))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rcomp;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
|
||||
v32 = atomic_read(&alg->compress_cnt);
|
||||
rcomp.stat_compress_cnt = v32;
|
||||
v64 = atomic64_read(&alg->compress_tlen);
|
||||
rcomp.stat_compress_tlen = v64;
|
||||
v32 = atomic_read(&alg->decompress_cnt);
|
||||
rcomp.stat_decompress_cnt = v32;
|
||||
v64 = atomic64_read(&alg->decompress_tlen);
|
||||
rcomp.stat_decompress_tlen = v64;
|
||||
v32 = atomic_read(&alg->cipher_err_cnt);
|
||||
rcomp.stat_compress_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS,
|
||||
sizeof(struct crypto_stat), &rcomp))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat racomp;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
|
||||
v32 = atomic_read(&alg->compress_cnt);
|
||||
racomp.stat_compress_cnt = v32;
|
||||
v64 = atomic64_read(&alg->compress_tlen);
|
||||
racomp.stat_compress_tlen = v64;
|
||||
v32 = atomic_read(&alg->decompress_cnt);
|
||||
racomp.stat_decompress_cnt = v32;
|
||||
v64 = atomic64_read(&alg->decompress_tlen);
|
||||
racomp.stat_decompress_tlen = v64;
|
||||
v32 = atomic_read(&alg->cipher_err_cnt);
|
||||
racomp.stat_compress_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP,
|
||||
sizeof(struct crypto_stat), &racomp))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rakcipher;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
|
||||
v32 = atomic_read(&alg->encrypt_cnt);
|
||||
rakcipher.stat_encrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->encrypt_tlen);
|
||||
rakcipher.stat_encrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->decrypt_cnt);
|
||||
rakcipher.stat_decrypt_cnt = v32;
|
||||
v64 = atomic64_read(&alg->decrypt_tlen);
|
||||
rakcipher.stat_decrypt_tlen = v64;
|
||||
v32 = atomic_read(&alg->sign_cnt);
|
||||
rakcipher.stat_sign_cnt = v32;
|
||||
v32 = atomic_read(&alg->verify_cnt);
|
||||
rakcipher.stat_verify_cnt = v32;
|
||||
v32 = atomic_read(&alg->akcipher_err_cnt);
|
||||
rakcipher.stat_akcipher_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
|
||||
sizeof(struct crypto_stat), &rakcipher))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rkpp;
|
||||
u32 v;
|
||||
|
||||
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
||||
|
||||
v = atomic_read(&alg->setsecret_cnt);
|
||||
rkpp.stat_setsecret_cnt = v;
|
||||
v = atomic_read(&alg->generate_public_key_cnt);
|
||||
rkpp.stat_generate_public_key_cnt = v;
|
||||
v = atomic_read(&alg->compute_shared_secret_cnt);
|
||||
rkpp.stat_compute_shared_secret_cnt = v;
|
||||
v = atomic_read(&alg->kpp_err_cnt);
|
||||
rkpp.stat_kpp_err_cnt = v;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_KPP,
|
||||
sizeof(struct crypto_stat), &rkpp))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rhash;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strncpy(rhash.type, "ahash", sizeof(rhash.type));
|
||||
|
||||
v32 = atomic_read(&alg->hash_cnt);
|
||||
rhash.stat_hash_cnt = v32;
|
||||
v64 = atomic64_read(&alg->hash_tlen);
|
||||
rhash.stat_hash_tlen = v64;
|
||||
v32 = atomic_read(&alg->hash_err_cnt);
|
||||
rhash.stat_hash_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
|
||||
sizeof(struct crypto_stat), &rhash))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rhash;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strncpy(rhash.type, "shash", sizeof(rhash.type));
|
||||
|
||||
v32 = atomic_read(&alg->hash_cnt);
|
||||
rhash.stat_hash_cnt = v32;
|
||||
v64 = atomic64_read(&alg->hash_tlen);
|
||||
rhash.stat_hash_tlen = v64;
|
||||
v32 = atomic_read(&alg->hash_err_cnt);
|
||||
rhash.stat_hash_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
|
||||
sizeof(struct crypto_stat), &rhash))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_stat rrng;
|
||||
u64 v64;
|
||||
u32 v32;
|
||||
|
||||
strncpy(rrng.type, "rng", sizeof(rrng.type));
|
||||
|
||||
v32 = atomic_read(&alg->generate_cnt);
|
||||
rrng.stat_generate_cnt = v32;
|
||||
v64 = atomic64_read(&alg->generate_tlen);
|
||||
rrng.stat_generate_tlen = v64;
|
||||
v32 = atomic_read(&alg->seed_cnt);
|
||||
rrng.stat_seed_cnt = v32;
|
||||
v32 = atomic_read(&alg->hash_err_cnt);
|
||||
rrng.stat_rng_err_cnt = v32;
|
||||
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_RNG,
|
||||
sizeof(struct crypto_stat), &rrng))
|
||||
goto nla_put_failure;
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_reportstat_one(struct crypto_alg *alg,
|
||||
struct crypto_user_alg *ualg,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
|
||||
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
|
||||
sizeof(ualg->cru_driver_name));
|
||||
strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
|
||||
sizeof(ualg->cru_module_name));
|
||||
|
||||
ualg->cru_type = 0;
|
||||
ualg->cru_mask = 0;
|
||||
ualg->cru_flags = alg->cra_flags;
|
||||
ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
|
||||
|
||||
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
|
||||
goto nla_put_failure;
|
||||
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
||||
struct crypto_stat rl;
|
||||
|
||||
strlcpy(rl.type, "larval", sizeof(rl.type));
|
||||
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
|
||||
sizeof(struct crypto_stat), &rl))
|
||||
goto nla_put_failure;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
|
||||
case CRYPTO_ALG_TYPE_AEAD:
|
||||
if (crypto_report_aead(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_BLKCIPHER:
|
||||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_CIPHER:
|
||||
if (crypto_report_cipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_COMPRESS:
|
||||
if (crypto_report_comp(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_ACOMPRESS:
|
||||
if (crypto_report_acomp(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_SCOMPRESS:
|
||||
if (crypto_report_acomp(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_AKCIPHER:
|
||||
if (crypto_report_akcipher(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_KPP:
|
||||
if (crypto_report_kpp(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
if (crypto_report_ahash(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_HASH:
|
||||
if (crypto_report_shash(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_RNG:
|
||||
if (crypto_report_rng(skb, alg))
|
||||
goto nla_put_failure;
|
||||
break;
|
||||
default:
|
||||
pr_err("ERROR: Unhandled alg %d in %s\n",
|
||||
alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
|
||||
__func__);
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int crypto_reportstat_alg(struct crypto_alg *alg,
|
||||
struct crypto_dump_info *info)
|
||||
{
|
||||
struct sk_buff *in_skb = info->in_skb;
|
||||
struct sk_buff *skb = info->out_skb;
|
||||
struct nlmsghdr *nlh;
|
||||
struct crypto_user_alg *ualg;
|
||||
int err = 0;
|
||||
|
||||
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
|
||||
CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
|
||||
if (!nlh) {
|
||||
err = -EMSGSIZE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ualg = nlmsg_data(nlh);
|
||||
|
||||
err = crypto_reportstat_one(alg, ualg, skb);
|
||||
if (err) {
|
||||
nlmsg_cancel(skb, nlh);
|
||||
goto out;
|
||||
}
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
||||
struct nlattr **attrs)
|
||||
{
|
||||
struct crypto_user_alg *p = nlmsg_data(in_nlh);
|
||||
struct crypto_alg *alg;
|
||||
struct sk_buff *skb;
|
||||
struct crypto_dump_info info;
|
||||
int err;
|
||||
|
||||
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
|
||||
return -EINVAL;
|
||||
|
||||
alg = crypto_alg_match(p, 0);
|
||||
if (!alg)
|
||||
return -ENOENT;
|
||||
|
||||
err = -ENOMEM;
|
||||
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto drop_alg;
|
||||
|
||||
info.in_skb = in_skb;
|
||||
info.out_skb = skb;
|
||||
info.nlmsg_seq = in_nlh->nlmsg_seq;
|
||||
info.nlmsg_flags = 0;
|
||||
|
||||
err = crypto_reportstat_alg(alg, &info);
|
||||
|
||||
drop_alg:
|
||||
crypto_mod_put(alg);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
|
||||
}
|
||||
|
||||
int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_dump_info info;
|
||||
int err;
|
||||
|
||||
if (cb->args[0])
|
||||
goto out;
|
||||
|
||||
cb->args[0] = 1;
|
||||
|
||||
info.in_skb = cb->skb;
|
||||
info.out_skb = skb;
|
||||
info.nlmsg_seq = cb->nlh->nlmsg_seq;
|
||||
info.nlmsg_flags = NLM_F_MULTI;
|
||||
|
||||
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
|
||||
err = crypto_reportstat_alg(alg, &info);
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
out:
|
||||
return skb->len;
|
||||
out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
int crypto_dump_reportstat_done(struct netlink_callback *cb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
@@ -47,9 +47,9 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||
info = req->iv;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||
|
||||
skcipher_request_set_tfm(nreq, ctx->sknull);
|
||||
skcipher_request_set_sync_tfm(nreq, ctx->sknull);
|
||||
skcipher_request_set_callback(nreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(nreq, req->src, req->dst,
|
||||
|
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
|
||||
|
||||
struct crypto_rfc4543_ctx {
|
||||
struct crypto_aead *child;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
u8 nonce[4];
|
||||
};
|
||||
|
||||
@@ -1067,9 +1067,9 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int nbytes = req->assoclen + req->cryptlen -
|
||||
(enc ? 0 : authsize);
|
||||
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
|
||||
|
||||
skcipher_request_set_tfm(nreq, ctx->null);
|
||||
skcipher_request_set_sync_tfm(nreq, ctx->null);
|
||||
skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
|
||||
|
||||
@@ -1093,7 +1093,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
|
||||
struct crypto_aead_spawn *spawn = &ictx->aead;
|
||||
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_aead *aead;
|
||||
struct crypto_skcipher *null;
|
||||
struct crypto_sync_skcipher *null;
|
||||
unsigned long align;
|
||||
int err = 0;
|
||||
|
||||
|
@@ -26,12 +26,6 @@
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* Crypto notification events. */
|
||||
enum {
|
||||
CRYPTO_MSG_ALG_REQUEST,
|
||||
CRYPTO_MSG_ALG_REGISTER,
|
||||
};
|
||||
|
||||
struct crypto_instance;
|
||||
struct crypto_template;
|
||||
|
||||
@@ -90,8 +84,6 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
void *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend, u32 type, u32 mask);
|
||||
|
||||
int crypto_register_notifier(struct notifier_block *nb);
|
||||
int crypto_unregister_notifier(struct notifier_block *nb);
|
||||
int crypto_probing_notify(unsigned long val, void *v);
|
||||
|
||||
unsigned int crypto_alg_extsize(struct crypto_alg *alg);
|
||||
|
351
crypto/lrw.c
351
crypto/lrw.c
@@ -29,8 +29,6 @@
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
|
||||
#define LRW_BUFFER_SIZE 128u
|
||||
|
||||
#define LRW_BLOCK_SIZE 16
|
||||
|
||||
struct priv {
|
||||
@@ -56,19 +54,7 @@ struct priv {
|
||||
};
|
||||
|
||||
struct rctx {
|
||||
be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
|
||||
|
||||
be128 t;
|
||||
|
||||
be128 *ext;
|
||||
|
||||
struct scatterlist srcbuf[2];
|
||||
struct scatterlist dstbuf[2];
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
|
||||
unsigned int left;
|
||||
|
||||
struct skcipher_request subreq;
|
||||
};
|
||||
|
||||
@@ -120,112 +106,68 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void inc(be128 *iv)
|
||||
/*
|
||||
* Returns the number of trailing '1' bits in the words of the counter, which is
|
||||
* represented by 4 32-bit words, arranged from least to most significant.
|
||||
* At the same time, increments the counter by one.
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
|
||||
* int i = next_index(&counter);
|
||||
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
|
||||
*/
|
||||
static int next_index(u32 *counter)
|
||||
{
|
||||
be64_add_cpu(&iv->b, 1);
|
||||
if (!iv->b)
|
||||
be64_add_cpu(&iv->a, 1);
|
||||
}
|
||||
int i, res = 0;
|
||||
|
||||
/* this returns the number of consequative 1 bits starting
|
||||
* from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
|
||||
static inline int get_index128(be128 *block)
|
||||
{
|
||||
int x;
|
||||
__be32 *p = (__be32 *) block;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (counter[i] + 1 != 0)
|
||||
return res + ffz(counter[i]++);
|
||||
|
||||
for (p += 3, x = 0; x < 128; p--, x += 32) {
|
||||
u32 val = be32_to_cpup(p);
|
||||
|
||||
if (!~val)
|
||||
continue;
|
||||
|
||||
return x + ffz(val);
|
||||
counter[i] = 0;
|
||||
res += 32;
|
||||
}
|
||||
|
||||
return x;
|
||||
/*
|
||||
* If we get here, then x == 128 and we are incrementing the counter
|
||||
* from all ones to all zeros. This means we must return index 127, i.e.
|
||||
* the one corresponding to key2*{ 1,...,1 }.
|
||||
*/
|
||||
return 127;
|
||||
}
|
||||
|
||||
static int post_crypt(struct skcipher_request *req)
|
||||
/*
|
||||
* We compute the tweak masks twice (both before and after the ECB encryption or
|
||||
* decryption) to avoid having to allocate a temporary buffer and/or make
|
||||
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
|
||||
* just doing the next_index() calls again.
|
||||
*/
|
||||
static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
be128 *buf = rctx->ext ?: rctx->buf;
|
||||
struct skcipher_request *subreq;
|
||||
const int bs = LRW_BLOCK_SIZE;
|
||||
struct skcipher_walk w;
|
||||
struct scatterlist *sg;
|
||||
unsigned offset;
|
||||
int err;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
err = skcipher_walk_virt(&w, subreq, false);
|
||||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
be128 *wdst;
|
||||
|
||||
wdst = w.dst.virt.addr;
|
||||
|
||||
do {
|
||||
be128_xor(wdst, buf++, wdst);
|
||||
wdst++;
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
err = skcipher_walk_done(&w, avail);
|
||||
}
|
||||
|
||||
rctx->left -= subreq->cryptlen;
|
||||
|
||||
if (err || !rctx->left)
|
||||
goto out;
|
||||
|
||||
rctx->dst = rctx->dstbuf;
|
||||
|
||||
scatterwalk_done(&w.out, 0, 1);
|
||||
sg = w.out.sg;
|
||||
offset = w.out.offset;
|
||||
|
||||
if (rctx->dst != sg) {
|
||||
rctx->dst[0] = *sg;
|
||||
sg_unmark_end(rctx->dst);
|
||||
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
|
||||
}
|
||||
rctx->dst[0].length -= offset - sg->offset;
|
||||
rctx->dst[0].offset = offset;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pre_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
be128 *buf = rctx->ext ?: rctx->buf;
|
||||
struct skcipher_request *subreq;
|
||||
const int bs = LRW_BLOCK_SIZE;
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
be128 t = rctx->t;
|
||||
struct skcipher_walk w;
|
||||
struct scatterlist *sg;
|
||||
unsigned cryptlen;
|
||||
unsigned offset;
|
||||
be128 *iv;
|
||||
bool more;
|
||||
__be32 *iv;
|
||||
u32 counter[4];
|
||||
int err;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
skcipher_request_set_tfm(subreq, tfm);
|
||||
if (second_pass) {
|
||||
req = &rctx->subreq;
|
||||
/* set to our TFM to enforce correct alignment: */
|
||||
skcipher_request_set_tfm(req, tfm);
|
||||
}
|
||||
|
||||
cryptlen = subreq->cryptlen;
|
||||
more = rctx->left > cryptlen;
|
||||
if (!more)
|
||||
cryptlen = rctx->left;
|
||||
err = skcipher_walk_virt(&w, req, false);
|
||||
iv = (__be32 *)w.iv;
|
||||
|
||||
skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
|
||||
cryptlen, req->iv);
|
||||
|
||||
err = skcipher_walk_virt(&w, subreq, false);
|
||||
iv = w.iv;
|
||||
counter[0] = be32_to_cpu(iv[3]);
|
||||
counter[1] = be32_to_cpu(iv[2]);
|
||||
counter[2] = be32_to_cpu(iv[1]);
|
||||
counter[3] = be32_to_cpu(iv[0]);
|
||||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
@@ -236,188 +178,85 @@ static int pre_crypt(struct skcipher_request *req)
|
||||
wdst = w.dst.virt.addr;
|
||||
|
||||
do {
|
||||
*buf++ = rctx->t;
|
||||
be128_xor(wdst++, &rctx->t, wsrc++);
|
||||
be128_xor(wdst++, &t, wsrc++);
|
||||
|
||||
/* T <- I*Key2, using the optimization
|
||||
* discussed in the specification */
|
||||
be128_xor(&rctx->t, &rctx->t,
|
||||
&ctx->mulinc[get_index128(iv)]);
|
||||
inc(iv);
|
||||
be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
if (second_pass && w.nbytes == w.total) {
|
||||
iv[0] = cpu_to_be32(counter[3]);
|
||||
iv[1] = cpu_to_be32(counter[2]);
|
||||
iv[2] = cpu_to_be32(counter[1]);
|
||||
iv[3] = cpu_to_be32(counter[0]);
|
||||
}
|
||||
|
||||
err = skcipher_walk_done(&w, avail);
|
||||
}
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
|
||||
cryptlen, NULL);
|
||||
|
||||
if (err || !more)
|
||||
goto out;
|
||||
|
||||
rctx->src = rctx->srcbuf;
|
||||
|
||||
scatterwalk_done(&w.in, 0, 1);
|
||||
sg = w.in.sg;
|
||||
offset = w.in.offset;
|
||||
|
||||
if (rctx->src != sg) {
|
||||
rctx->src[0] = *sg;
|
||||
sg_unmark_end(rctx->src);
|
||||
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
|
||||
}
|
||||
rctx->src[0].length -= offset - sg->offset;
|
||||
rctx->src[0].offset = offset;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
|
||||
static int xor_tweak_pre(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, false);
|
||||
}
|
||||
|
||||
static int xor_tweak_post(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, true);
|
||||
}
|
||||
|
||||
static void crypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err)
|
||||
err = xor_tweak_post(req);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void init_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
gfp_t gfp;
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
skcipher_request_set_callback(subreq, req->base.flags, done, req);
|
||||
|
||||
gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
||||
GFP_ATOMIC;
|
||||
rctx->ext = NULL;
|
||||
|
||||
subreq->cryptlen = LRW_BUFFER_SIZE;
|
||||
if (req->cryptlen > LRW_BUFFER_SIZE) {
|
||||
unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
|
||||
|
||||
rctx->ext = kmalloc(n, gfp);
|
||||
if (rctx->ext)
|
||||
subreq->cryptlen = n;
|
||||
}
|
||||
|
||||
rctx->src = req->src;
|
||||
rctx->dst = req->dst;
|
||||
rctx->left = req->cryptlen;
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
|
||||
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
|
||||
skcipher_request_set_crypt(subreq, req->dst, req->dst,
|
||||
req->cryptlen, req->iv);
|
||||
|
||||
/* calculate first value of T */
|
||||
memcpy(&rctx->t, req->iv, sizeof(rctx->t));
|
||||
|
||||
/* T <- I*Key2 */
|
||||
gf128mul_64k_bbe(&rctx->t, ctx->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
rctx->left = 0;
|
||||
|
||||
if (rctx->ext)
|
||||
kzfree(rctx->ext);
|
||||
}
|
||||
|
||||
static int do_encrypt(struct skcipher_request *req, int err)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
|
||||
while (!err && rctx->left) {
|
||||
err = pre_crypt(req) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
exit_crypt(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void encrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
struct skcipher_request *subreq;
|
||||
struct rctx *rctx;
|
||||
|
||||
rctx = skcipher_request_ctx(req);
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (rctx->left != req->cryptlen)
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
|
||||
err = do_encrypt(req, err ?: post_crypt(req));
|
||||
if (rctx->left)
|
||||
return;
|
||||
|
||||
out:
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return do_encrypt(req, init_crypt(req, encrypt_done));
|
||||
}
|
||||
|
||||
static int do_decrypt(struct skcipher_request *req, int err)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
|
||||
while (!err && rctx->left) {
|
||||
err = pre_crypt(req) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
exit_crypt(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void decrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
struct skcipher_request *subreq;
|
||||
struct rctx *rctx;
|
||||
|
||||
rctx = skcipher_request_ctx(req);
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (rctx->left != req->cryptlen)
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
|
||||
err = do_decrypt(req, err ?: post_crypt(req));
|
||||
if (rctx->left)
|
||||
return;
|
||||
|
||||
out:
|
||||
skcipher_request_complete(req, err);
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return do_decrypt(req, init_crypt(req, decrypt_done));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
@@ -543,7 +382,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
inst->alg.base.cra_priority = alg->base.cra_priority;
|
||||
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
|
||||
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
|
||||
(__alignof__(u64) - 1);
|
||||
(__alignof__(__be32) - 1);
|
||||
|
||||
inst->alg.ivsize = LRW_BLOCK_SIZE;
|
||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
|
||||
|
675
crypto/mcryptd.c
675
crypto/mcryptd.c
@@ -1,675 +0,0 @@
|
||||
/*
|
||||
* Software multibuffer async crypto daemon.
|
||||
*
|
||||
* Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
|
||||
*
|
||||
* Adapted from crypto daemon.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/mcryptd.h>
|
||||
#include <crypto/crypto_wq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define MCRYPTD_MAX_CPU_QLEN 100
|
||||
#define MCRYPTD_BATCH 9
|
||||
|
||||
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
|
||||
unsigned int tail);
|
||||
|
||||
struct mcryptd_flush_list {
|
||||
struct list_head list;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
static struct mcryptd_flush_list __percpu *mcryptd_flist;
|
||||
|
||||
struct hashd_instance_ctx {
|
||||
struct crypto_ahash_spawn spawn;
|
||||
struct mcryptd_queue *queue;
|
||||
};
|
||||
|
||||
static void mcryptd_queue_worker(struct work_struct *work);
|
||||
|
||||
void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
|
||||
{
|
||||
struct mcryptd_flush_list *flist;
|
||||
|
||||
if (!cstate->flusher_engaged) {
|
||||
/* put the flusher on the flush list */
|
||||
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
|
||||
mutex_lock(&flist->lock);
|
||||
list_add_tail(&cstate->flush_list, &flist->list);
|
||||
cstate->flusher_engaged = true;
|
||||
cstate->next_flush = jiffies + delay;
|
||||
queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
|
||||
&cstate->flush, delay);
|
||||
mutex_unlock(&flist->lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mcryptd_arm_flusher);
|
||||
|
||||
static int mcryptd_init_queue(struct mcryptd_queue *queue,
|
||||
unsigned int max_cpu_qlen)
|
||||
{
|
||||
int cpu;
|
||||
struct mcryptd_cpu_queue *cpu_queue;
|
||||
|
||||
queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
|
||||
pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
|
||||
if (!queue->cpu_queue)
|
||||
return -ENOMEM;
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
|
||||
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
|
||||
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
||||
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
|
||||
spin_lock_init(&cpu_queue->q_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mcryptd_fini_queue(struct mcryptd_queue *queue)
|
||||
{
|
||||
int cpu;
|
||||
struct mcryptd_cpu_queue *cpu_queue;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
|
||||
BUG_ON(cpu_queue->queue.qlen);
|
||||
}
|
||||
free_percpu(queue->cpu_queue);
|
||||
}
|
||||
|
||||
static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
|
||||
struct crypto_async_request *request,
|
||||
struct mcryptd_hash_request_ctx *rctx)
|
||||
{
|
||||
int cpu, err;
|
||||
struct mcryptd_cpu_queue *cpu_queue;
|
||||
|
||||
cpu_queue = raw_cpu_ptr(queue->cpu_queue);
|
||||
spin_lock(&cpu_queue->q_lock);
|
||||
cpu = smp_processor_id();
|
||||
rctx->tag.cpu = smp_processor_id();
|
||||
|
||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
|
||||
cpu, cpu_queue, request);
|
||||
spin_unlock(&cpu_queue->q_lock);
|
||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to opportunisticlly flush the partially completed jobs if
|
||||
* crypto daemon is the only task running.
|
||||
*/
|
||||
static void mcryptd_opportunistic_flush(void)
|
||||
{
|
||||
struct mcryptd_flush_list *flist;
|
||||
struct mcryptd_alg_cstate *cstate;
|
||||
|
||||
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
|
||||
while (single_task_running()) {
|
||||
mutex_lock(&flist->lock);
|
||||
cstate = list_first_entry_or_null(&flist->list,
|
||||
struct mcryptd_alg_cstate, flush_list);
|
||||
if (!cstate || !cstate->flusher_engaged) {
|
||||
mutex_unlock(&flist->lock);
|
||||
return;
|
||||
}
|
||||
list_del(&cstate->flush_list);
|
||||
cstate->flusher_engaged = false;
|
||||
mutex_unlock(&flist->lock);
|
||||
cstate->alg_state->flusher(cstate);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called in workqueue context, do one real cryption work (via
|
||||
* req->complete) and reschedule itself if there are more work to
|
||||
* do.
|
||||
*/
|
||||
static void mcryptd_queue_worker(struct work_struct *work)
|
||||
{
|
||||
struct mcryptd_cpu_queue *cpu_queue;
|
||||
struct crypto_async_request *req, *backlog;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Need to loop through more than once for multi-buffer to
|
||||
* be effective.
|
||||
*/
|
||||
|
||||
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
|
||||
i = 0;
|
||||
while (i < MCRYPTD_BATCH || single_task_running()) {
|
||||
|
||||
spin_lock_bh(&cpu_queue->q_lock);
|
||||
backlog = crypto_get_backlog(&cpu_queue->queue);
|
||||
req = crypto_dequeue_request(&cpu_queue->queue);
|
||||
spin_unlock_bh(&cpu_queue->q_lock);
|
||||
|
||||
if (!req) {
|
||||
mcryptd_opportunistic_flush();
|
||||
return;
|
||||
}
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
req->complete(req, 0);
|
||||
if (!cpu_queue->queue.qlen)
|
||||
return;
|
||||
++i;
|
||||
}
|
||||
if (cpu_queue->queue.qlen)
|
||||
queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
|
||||
}
|
||||
|
||||
void mcryptd_flusher(struct work_struct *__work)
|
||||
{
|
||||
struct mcryptd_alg_cstate *alg_cpu_state;
|
||||
struct mcryptd_alg_state *alg_state;
|
||||
struct mcryptd_flush_list *flist;
|
||||
int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
alg_cpu_state = container_of(to_delayed_work(__work),
|
||||
struct mcryptd_alg_cstate, flush);
|
||||
alg_state = alg_cpu_state->alg_state;
|
||||
if (alg_cpu_state->cpu != cpu)
|
||||
pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
|
||||
cpu, alg_cpu_state->cpu);
|
||||
|
||||
if (alg_cpu_state->flusher_engaged) {
|
||||
flist = per_cpu_ptr(mcryptd_flist, cpu);
|
||||
mutex_lock(&flist->lock);
|
||||
list_del(&alg_cpu_state->flush_list);
|
||||
alg_cpu_state->flusher_engaged = false;
|
||||
mutex_unlock(&flist->lock);
|
||||
alg_state->flusher(alg_cpu_state);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_flusher);
|
||||
|
||||
static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
|
||||
return ictx->queue;
|
||||
}
|
||||
|
||||
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
|
||||
unsigned int tail)
|
||||
{
|
||||
char *p;
|
||||
struct crypto_instance *inst;
|
||||
int err;
|
||||
|
||||
p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
inst = (void *)(p + head);
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto out_free_inst;
|
||||
|
||||
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.cra_priority = alg->cra_priority + 50;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
|
||||
out:
|
||||
return p;
|
||||
|
||||
out_free_inst:
|
||||
kfree(p);
|
||||
p = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
u32 *mask)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return false;
|
||||
|
||||
*type |= algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
|
||||
if (*type & *mask & CRYPTO_ALG_INTERNAL)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
|
||||
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_ahash_spawn *spawn = &ictx->spawn;
|
||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ahash *hash;
|
||||
|
||||
hash = crypto_spawn_ahash(spawn);
|
||||
if (IS_ERR(hash))
|
||||
return PTR_ERR(hash);
|
||||
|
||||
ctx->child = hash;
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct mcryptd_hash_request_ctx) +
|
||||
crypto_ahash_reqsize(hash));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_ahash(ctx->child);
|
||||
}
|
||||
|
||||
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
|
||||
struct crypto_ahash *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_ahash_setkey(child, key, keylen);
|
||||
crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mcryptd_hash_enqueue(struct ahash_request *req,
|
||||
crypto_completion_t complete)
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct mcryptd_queue *queue =
|
||||
mcryptd_get_queue(crypto_ahash_tfm(tfm));
|
||||
|
||||
rctx->complete = req->base.complete;
|
||||
req->base.complete = complete;
|
||||
|
||||
ret = mcryptd_enqueue_request(queue, &req->base, rctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
||||
struct crypto_ahash *child = ctx->child;
|
||||
struct ahash_request *req = ahash_request_cast(req_async);
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ahash_request *desc = &rctx->areq;
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
ahash_request_set_tfm(desc, child);
|
||||
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
rctx->complete, req_async);
|
||||
|
||||
rctx->out = req->result;
|
||||
err = crypto_ahash_init(desc);
|
||||
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int mcryptd_hash_init_enqueue(struct ahash_request *req)
|
||||
{
|
||||
return mcryptd_hash_enqueue(req, mcryptd_hash_init);
|
||||
}
|
||||
|
||||
static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(req_async);
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
rctx->out = req->result;
|
||||
err = crypto_ahash_update(&rctx->areq);
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return;
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int mcryptd_hash_update_enqueue(struct ahash_request *req)
|
||||
{
|
||||
return mcryptd_hash_enqueue(req, mcryptd_hash_update);
|
||||
}
|
||||
|
||||
static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(req_async);
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
rctx->out = req->result;
|
||||
err = crypto_ahash_final(&rctx->areq);
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return;
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int mcryptd_hash_final_enqueue(struct ahash_request *req)
|
||||
{
|
||||
return mcryptd_hash_enqueue(req, mcryptd_hash_final);
|
||||
}
|
||||
|
||||
static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(req_async);
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
rctx->out = req->result;
|
||||
err = crypto_ahash_finup(&rctx->areq);
|
||||
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return;
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
|
||||
{
|
||||
return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
|
||||
}
|
||||
|
||||
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
|
||||
struct crypto_ahash *child = ctx->child;
|
||||
struct ahash_request *req = ahash_request_cast(req_async);
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ahash_request *desc = &rctx->areq;
|
||||
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
|
||||
ahash_request_set_tfm(desc, child);
|
||||
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
rctx->complete, req_async);
|
||||
|
||||
rctx->out = req->result;
|
||||
err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
|
||||
|
||||
out:
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, err);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
|
||||
{
|
||||
return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
|
||||
}
|
||||
|
||||
static int mcryptd_hash_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
return crypto_ahash_export(&rctx->areq, out);
|
||||
}
|
||||
|
||||
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
return crypto_ahash_import(&rctx->areq, in);
|
||||
}
|
||||
|
||||
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
struct mcryptd_queue *queue)
|
||||
{
|
||||
struct hashd_instance_ctx *ctx;
|
||||
struct ahash_instance *inst;
|
||||
struct hash_alg_common *halg;
|
||||
struct crypto_alg *alg;
|
||||
u32 type = 0;
|
||||
u32 mask = 0;
|
||||
int err;
|
||||
|
||||
if (!mcryptd_check_internal(tb, &type, &mask))
|
||||
return -EINVAL;
|
||||
|
||||
halg = ahash_attr_alg(tb[1], type, mask);
|
||||
if (IS_ERR(halg))
|
||||
return PTR_ERR(halg);
|
||||
|
||||
alg = &halg->base;
|
||||
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
|
||||
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
|
||||
sizeof(*ctx));
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
ctx = ahash_instance_ctx(inst);
|
||||
ctx->queue = queue;
|
||||
|
||||
err = crypto_init_ahash_spawn(&ctx->spawn, halg,
|
||||
ahash_crypto_instance(inst));
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
|
||||
CRYPTO_ALG_OPTIONAL_KEY));
|
||||
|
||||
inst->alg.halg.digestsize = halg->digestsize;
|
||||
inst->alg.halg.statesize = halg->statesize;
|
||||
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
|
||||
|
||||
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
|
||||
inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
|
||||
|
||||
inst->alg.init = mcryptd_hash_init_enqueue;
|
||||
inst->alg.update = mcryptd_hash_update_enqueue;
|
||||
inst->alg.final = mcryptd_hash_final_enqueue;
|
||||
inst->alg.finup = mcryptd_hash_finup_enqueue;
|
||||
inst->alg.export = mcryptd_hash_export;
|
||||
inst->alg.import = mcryptd_hash_import;
|
||||
if (crypto_hash_alg_has_setkey(halg))
|
||||
inst->alg.setkey = mcryptd_hash_setkey;
|
||||
inst->alg.digest = mcryptd_hash_digest_enqueue;
|
||||
|
||||
err = ahash_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
crypto_drop_ahash(&ctx->spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
out_put_alg:
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct mcryptd_queue mqueue;
|
||||
|
||||
static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return PTR_ERR(algt);
|
||||
|
||||
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_DIGEST:
|
||||
return mcryptd_create_hash(tmpl, tb, &mqueue);
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void mcryptd_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
|
||||
|
||||
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
crypto_drop_ahash(&hctx->spawn);
|
||||
kfree(ahash_instance(inst));
|
||||
return;
|
||||
default:
|
||||
crypto_drop_spawn(&ctx->spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
}
|
||||
|
||||
static struct crypto_template mcryptd_tmpl = {
|
||||
.name = "mcryptd",
|
||||
.create = mcryptd_create,
|
||||
.free = mcryptd_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
struct crypto_ahash *tfm;
|
||||
|
||||
if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
|
||||
"mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-EINVAL);
|
||||
tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
|
||||
if (IS_ERR(tfm))
|
||||
return ERR_CAST(tfm);
|
||||
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
|
||||
crypto_free_ahash(tfm);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return __mcryptd_ahash_cast(tfm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
|
||||
|
||||
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
||||
|
||||
return ctx->child;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
|
||||
|
||||
struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
|
||||
{
|
||||
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
return &rctx->areq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
|
||||
|
||||
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
|
||||
{
|
||||
crypto_free_ahash(&tfm->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
|
||||
|
||||
static int __init mcryptd_init(void)
|
||||
{
|
||||
int err, cpu;
|
||||
struct mcryptd_flush_list *flist;
|
||||
|
||||
mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
|
||||
for_each_possible_cpu(cpu) {
|
||||
flist = per_cpu_ptr(mcryptd_flist, cpu);
|
||||
INIT_LIST_HEAD(&flist->list);
|
||||
mutex_init(&flist->lock);
|
||||
}
|
||||
|
||||
err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
|
||||
if (err) {
|
||||
free_percpu(mcryptd_flist);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = crypto_register_template(&mcryptd_tmpl);
|
||||
if (err) {
|
||||
mcryptd_fini_queue(&mqueue);
|
||||
free_percpu(mcryptd_flist);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit mcryptd_exit(void)
|
||||
{
|
||||
mcryptd_fini_queue(&mqueue);
|
||||
crypto_unregister_template(&mcryptd_tmpl);
|
||||
free_percpu(mcryptd_flist);
|
||||
}
|
||||
|
||||
subsys_initcall(mcryptd_init);
|
||||
module_exit(mcryptd_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
|
||||
MODULE_ALIAS_CRYPTO("mcryptd");
|
@@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state,
|
||||
struct morus1280_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen)
|
||||
{
|
||||
u64 assocbits = assoclen * 8;
|
||||
u64 cryptbits = cryptlen * 8;
|
||||
|
||||
struct morus1280_block tmp;
|
||||
unsigned int i;
|
||||
|
||||
tmp.words[0] = cpu_to_le64(assocbits);
|
||||
tmp.words[1] = cpu_to_le64(cryptbits);
|
||||
tmp.words[0] = assoclen * 8;
|
||||
tmp.words[1] = cryptlen * 8;
|
||||
tmp.words[2] = 0;
|
||||
tmp.words[3] = 0;
|
||||
|
||||
|
@@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state,
|
||||
struct morus640_block *tag_xor,
|
||||
u64 assoclen, u64 cryptlen)
|
||||
{
|
||||
u64 assocbits = assoclen * 8;
|
||||
u64 cryptbits = cryptlen * 8;
|
||||
|
||||
u32 assocbits_lo = (u32)assocbits;
|
||||
u32 assocbits_hi = (u32)(assocbits >> 32);
|
||||
u32 cryptbits_lo = (u32)cryptbits;
|
||||
u32 cryptbits_hi = (u32)(cryptbits >> 32);
|
||||
|
||||
struct morus640_block tmp;
|
||||
unsigned int i;
|
||||
|
||||
tmp.words[0] = cpu_to_le32(assocbits_lo);
|
||||
tmp.words[1] = cpu_to_le32(assocbits_hi);
|
||||
tmp.words[2] = cpu_to_le32(cryptbits_lo);
|
||||
tmp.words[3] = cpu_to_le32(cryptbits_hi);
|
||||
tmp.words[0] = lower_32_bits(assoclen * 8);
|
||||
tmp.words[1] = upper_32_bits(assoclen * 8);
|
||||
tmp.words[2] = lower_32_bits(cryptlen * 8);
|
||||
tmp.words[3] = upper_32_bits(cryptlen * 8);
|
||||
|
||||
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
|
||||
state->s[4].words[i] ^= state->s[0].words[i];
|
||||
|
225
crypto/ofb.c
Normal file
225
crypto/ofb.c
Normal file
@@ -0,0 +1,225 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* OFB: Output FeedBack mode
|
||||
*
|
||||
* Copyright (C) 2018 ARM Limited or its affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Based loosely on public domain code gleaned from libtomcrypt
|
||||
* (https://github.com/libtom/libtomcrypt).
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct crypto_ofb_ctx {
|
||||
struct crypto_cipher *child;
|
||||
int cnt;
|
||||
};
|
||||
|
||||
|
||||
static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_cipher_setkey(child, key, keylen);
|
||||
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
|
||||
struct skcipher_walk *walk,
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
int nbytes = walk->nbytes;
|
||||
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
if (ctx->cnt == bsize) {
|
||||
if (nbytes < bsize)
|
||||
break;
|
||||
crypto_cipher_encrypt_one(tfm, iv, iv);
|
||||
ctx->cnt = 0;
|
||||
}
|
||||
*dst = *src ^ iv[ctx->cnt];
|
||||
src++;
|
||||
dst++;
|
||||
ctx->cnt++;
|
||||
} while (--nbytes);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_ofb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
unsigned int bsize;
|
||||
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
int ret = 0;
|
||||
|
||||
bsize = crypto_cipher_blocksize(child);
|
||||
ctx->cnt = bsize;
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
|
||||
ret = skcipher_walk_done(&walk, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* OFB encrypt and decrypt are identical */
|
||||
static int crypto_ofb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return crypto_ofb_encrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
|
||||
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_cipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(ctx->child);
|
||||
}
|
||||
|
||||
static void crypto_ofb_free(struct skcipher_instance *inst)
|
||||
{
|
||||
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
goto err_free_inst;
|
||||
|
||||
mask = CRYPTO_ALG_TYPE_MASK |
|
||||
crypto_requires_off(algt->type, algt->mask,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
|
||||
err = PTR_ERR(alg);
|
||||
if (IS_ERR(alg))
|
||||
goto err_free_inst;
|
||||
|
||||
spawn = skcipher_instance_ctx(inst);
|
||||
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
crypto_mod_put(alg);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
inst->alg.base.cra_priority = alg->cra_priority;
|
||||
inst->alg.base.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.base.cra_alignmask = alg->cra_alignmask;
|
||||
|
||||
/* We access the data as u32s when xoring. */
|
||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
||||
|
||||
inst->alg.ivsize = alg->cra_blocksize;
|
||||
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
|
||||
|
||||
inst->alg.init = crypto_ofb_init_tfm;
|
||||
inst->alg.exit = crypto_ofb_exit_tfm;
|
||||
|
||||
inst->alg.setkey = crypto_ofb_setkey;
|
||||
inst->alg.encrypt = crypto_ofb_encrypt;
|
||||
inst->alg.decrypt = crypto_ofb_decrypt;
|
||||
|
||||
inst->free = crypto_ofb_free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_spawn(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ofb_tmpl = {
|
||||
.name = "ofb",
|
||||
.create = crypto_ofb_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_ofb_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_ofb_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_ofb_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_ofb_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_ofb_module_init);
|
||||
module_exit(crypto_ofb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("OFB block cipher algorithm");
|
||||
MODULE_ALIAS_CRYPTO("ofb");
|
@@ -50,6 +50,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
|
||||
}
|
||||
|
||||
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
|
||||
crypto_stat_rng_seed(tfm, err);
|
||||
out:
|
||||
kzfree(buf);
|
||||
return err;
|
||||
|
@@ -261,15 +261,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
||||
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
||||
ctx->key_size - 1 - req->src_len, req->src);
|
||||
|
||||
req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
|
||||
if (!req_ctx->out_buf) {
|
||||
kfree(req_ctx->in_buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
||||
ctx->key_size, NULL);
|
||||
|
||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||
pkcs1pad_encrypt_sign_complete_cb, req);
|
||||
|
@@ -73,9 +73,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
info = req->iv;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
|
||||
|
||||
skcipher_request_set_tfm(nreq, ctx->sknull);
|
||||
skcipher_request_set_sync_tfm(nreq, ctx->sknull);
|
||||
skcipher_request_set_callback(nreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(nreq, req->src, req->dst,
|
||||
|
@@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
|
||||
|
||||
static inline unsigned int shash_align_buffer_size(unsigned len,
|
||||
unsigned long mask)
|
||||
{
|
||||
typedef u8 __aligned_largest u8_aligned;
|
||||
return len + (mask & ~(__alignof__(u8_aligned) - 1));
|
||||
}
|
||||
|
||||
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
@@ -88,11 +81,17 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
unsigned int unaligned_len = alignmask + 1 -
|
||||
((unsigned long)data & alignmask);
|
||||
u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
|
||||
__aligned_largest;
|
||||
/*
|
||||
* We cannot count on __aligned() working for large values:
|
||||
* https://patchwork.kernel.org/patch/9507697/
|
||||
*/
|
||||
u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
|
||||
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
|
||||
int err;
|
||||
|
||||
if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
|
||||
return -EINVAL;
|
||||
|
||||
if (unaligned_len > len)
|
||||
unaligned_len = len;
|
||||
|
||||
@@ -124,11 +123,17 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
|
||||
unsigned long alignmask = crypto_shash_alignmask(tfm);
|
||||
struct shash_alg *shash = crypto_shash_alg(tfm);
|
||||
unsigned int ds = crypto_shash_digestsize(tfm);
|
||||
u8 ubuf[shash_align_buffer_size(ds, alignmask)]
|
||||
__aligned_largest;
|
||||
/*
|
||||
* We cannot count on __aligned() working for large values:
|
||||
* https://patchwork.kernel.org/patch/9507697/
|
||||
*/
|
||||
u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
|
||||
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
|
||||
int err;
|
||||
|
||||
if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
|
||||
return -EINVAL;
|
||||
|
||||
err = shash->final(desc, buf);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -458,9 +463,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
if (alg->digestsize > PAGE_SIZE / 8 ||
|
||||
alg->descsize > PAGE_SIZE / 8 ||
|
||||
alg->statesize > PAGE_SIZE / 8)
|
||||
if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
|
||||
alg->descsize > HASH_MAX_DESCSIZE ||
|
||||
alg->statesize > HASH_MAX_STATESIZE)
|
||||
return -EINVAL;
|
||||
|
||||
base->cra_type = &crypto_shash_type;
|
||||
|
@@ -949,6 +949,30 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
|
||||
|
||||
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
|
||||
const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_skcipher *tfm;
|
||||
|
||||
/* Only sync algorithms allowed. */
|
||||
mask |= CRYPTO_ALG_ASYNC;
|
||||
|
||||
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
|
||||
|
||||
/*
|
||||
* Make sure we do not allocate something that might get used with
|
||||
* an on-stack request: check the request size.
|
||||
*/
|
||||
if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
|
||||
MAX_SYNC_SKCIPHER_REQSIZE)) {
|
||||
crypto_free_skcipher(tfm);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return (struct crypto_sync_skcipher *)tfm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
|
||||
|
||||
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
|
||||
|
307
crypto/speck.c
307
crypto/speck.c
@@ -1,307 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Speck: a lightweight block cipher
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*
|
||||
* Speck has 10 variants, including 5 block sizes. For now we only implement
|
||||
* the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
|
||||
* Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
|
||||
* and a key size of K bits. The Speck128 variants are believed to be the most
|
||||
* secure variants, and they use the same block size and key sizes as AES. The
|
||||
* Speck64 variants are less secure, but on 32-bit processors are usually
|
||||
* faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
|
||||
* secure and/or not as well suited for implementation on either 32-bit or
|
||||
* 64-bit processors, so are omitted.
|
||||
*
|
||||
* Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
|
||||
* https://eprint.iacr.org/2013/404.pdf
|
||||
*
|
||||
* In a correspondence, the Speck designers have also clarified that the words
|
||||
* should be interpreted in little-endian format, and the words should be
|
||||
* ordered such that the first word of each block is 'y' rather than 'x', and
|
||||
* the first key word (rather than the last) becomes the first round key.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/speck.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* Speck128 */
|
||||
|
||||
static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
|
||||
{
|
||||
*x = ror64(*x, 8);
|
||||
*x += *y;
|
||||
*x ^= k;
|
||||
*y = rol64(*y, 3);
|
||||
*y ^= *x;
|
||||
}
|
||||
|
||||
static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
|
||||
{
|
||||
*y ^= *x;
|
||||
*y = ror64(*y, 3);
|
||||
*x ^= k;
|
||||
*x -= *y;
|
||||
*x = rol64(*x, 8);
|
||||
}
|
||||
|
||||
void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u64 y = get_unaligned_le64(in);
|
||||
u64 x = get_unaligned_le64(in + 8);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->nrounds; i++)
|
||||
speck128_round(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le64(y, out);
|
||||
put_unaligned_le64(x, out + 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
|
||||
|
||||
static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u64 y = get_unaligned_le64(in);
|
||||
u64 x = get_unaligned_le64(in + 8);
|
||||
int i;
|
||||
|
||||
for (i = ctx->nrounds - 1; i >= 0; i--)
|
||||
speck128_unround(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le64(y, out);
|
||||
put_unaligned_le64(x, out + 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
|
||||
|
||||
static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u64 l[3];
|
||||
u64 k;
|
||||
int i;
|
||||
|
||||
switch (keylen) {
|
||||
case SPECK128_128_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
ctx->nrounds = SPECK128_128_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[0], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK128_192_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
l[1] = get_unaligned_le64(key + 16);
|
||||
ctx->nrounds = SPECK128_192_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[i % 2], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK128_256_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
l[1] = get_unaligned_le64(key + 16);
|
||||
l[2] = get_unaligned_le64(key + 24);
|
||||
ctx->nrounds = SPECK128_256_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[i % 3], &k, i);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
|
||||
|
||||
static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
/* Speck64 */
|
||||
|
||||
static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
|
||||
{
|
||||
*x = ror32(*x, 8);
|
||||
*x += *y;
|
||||
*x ^= k;
|
||||
*y = rol32(*y, 3);
|
||||
*y ^= *x;
|
||||
}
|
||||
|
||||
static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
|
||||
{
|
||||
*y ^= *x;
|
||||
*y = ror32(*y, 3);
|
||||
*x ^= k;
|
||||
*x -= *y;
|
||||
*x = rol32(*x, 8);
|
||||
}
|
||||
|
||||
void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u32 y = get_unaligned_le32(in);
|
||||
u32 x = get_unaligned_le32(in + 4);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->nrounds; i++)
|
||||
speck64_round(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le32(y, out);
|
||||
put_unaligned_le32(x, out + 4);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
|
||||
|
||||
static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u32 y = get_unaligned_le32(in);
|
||||
u32 x = get_unaligned_le32(in + 4);
|
||||
int i;
|
||||
|
||||
for (i = ctx->nrounds - 1; i >= 0; i--)
|
||||
speck64_unround(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le32(y, out);
|
||||
put_unaligned_le32(x, out + 4);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
|
||||
|
||||
static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u32 l[3];
|
||||
u32 k;
|
||||
int i;
|
||||
|
||||
switch (keylen) {
|
||||
case SPECK64_96_KEY_SIZE:
|
||||
k = get_unaligned_le32(key);
|
||||
l[0] = get_unaligned_le32(key + 4);
|
||||
l[1] = get_unaligned_le32(key + 8);
|
||||
ctx->nrounds = SPECK64_96_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck64_round(&l[i % 2], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK64_128_KEY_SIZE:
|
||||
k = get_unaligned_le32(key);
|
||||
l[0] = get_unaligned_le32(key + 4);
|
||||
l[1] = get_unaligned_le32(key + 8);
|
||||
l[2] = get_unaligned_le32(key + 12);
|
||||
ctx->nrounds = SPECK64_128_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck64_round(&l[i % 3], &k, i);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
|
||||
|
||||
static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
/* Algorithm definitions */
|
||||
|
||||
static struct crypto_alg speck_algs[] = {
|
||||
{
|
||||
.cra_name = "speck128",
|
||||
.cra_driver_name = "speck128-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = SPECK128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct speck128_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = SPECK128_128_KEY_SIZE,
|
||||
.cia_max_keysize = SPECK128_256_KEY_SIZE,
|
||||
.cia_setkey = speck128_setkey,
|
||||
.cia_encrypt = speck128_encrypt,
|
||||
.cia_decrypt = speck128_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "speck64",
|
||||
.cra_driver_name = "speck64-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = SPECK64_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct speck64_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = SPECK64_96_KEY_SIZE,
|
||||
.cia_max_keysize = SPECK64_128_KEY_SIZE,
|
||||
.cia_setkey = speck64_setkey,
|
||||
.cia_encrypt = speck64_encrypt,
|
||||
.cia_decrypt = speck64_decrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init speck_module_init(void)
|
||||
{
|
||||
return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
static void __exit speck_module_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
module_init(speck_module_init);
|
||||
module_exit(speck_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Speck block cipher (generic)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("speck128");
|
||||
MODULE_ALIAS_CRYPTO("speck128-generic");
|
||||
MODULE_ALIAS_CRYPTO("speck64");
|
||||
MODULE_ALIAS_CRYPTO("speck64-generic");
|
@@ -76,8 +76,7 @@ static char *check[] = {
|
||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
|
||||
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
|
||||
"lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
|
||||
NULL
|
||||
"lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
|
||||
};
|
||||
|
||||
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
|
||||
@@ -1103,6 +1102,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
|
||||
break;
|
||||
}
|
||||
|
||||
if (speed[i].klen)
|
||||
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
|
||||
|
||||
pr_info("test%3u "
|
||||
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
|
||||
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
|
||||
@@ -1733,6 +1735,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("xts(aes)");
|
||||
ret += tcrypt_test("ctr(aes)");
|
||||
ret += tcrypt_test("rfc3686(ctr(aes))");
|
||||
ret += tcrypt_test("ofb(aes)");
|
||||
break;
|
||||
|
||||
case 11:
|
||||
@@ -1878,10 +1881,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("ecb(seed)");
|
||||
break;
|
||||
|
||||
case 44:
|
||||
ret += tcrypt_test("zlib");
|
||||
break;
|
||||
|
||||
case 45:
|
||||
ret += tcrypt_test("rfc4309(ccm(aes))");
|
||||
break;
|
||||
@@ -2033,6 +2032,8 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
break;
|
||||
case 191:
|
||||
ret += tcrypt_test("ecb(sm4)");
|
||||
ret += tcrypt_test("cbc(sm4)");
|
||||
ret += tcrypt_test("ctr(sm4)");
|
||||
break;
|
||||
case 200:
|
||||
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
@@ -2282,6 +2283,20 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
num_mb);
|
||||
break;
|
||||
|
||||
case 218:
|
||||
test_cipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16);
|
||||
break;
|
||||
case 300:
|
||||
if (alg) {
|
||||
test_hash_speed(alg, sec, generic_hash_speed_template);
|
||||
|
@@ -51,6 +51,7 @@ static struct cipher_speed_template des3_speed_template[] = {
|
||||
* Cipher speed tests
|
||||
*/
|
||||
static u8 speed_template_8[] = {8, 0};
|
||||
static u8 speed_template_16[] = {16, 0};
|
||||
static u8 speed_template_24[] = {24, 0};
|
||||
static u8 speed_template_8_16[] = {8, 16, 0};
|
||||
static u8 speed_template_8_32[] = {8, 32, 0};
|
||||
|
@@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
|
||||
int ilen;
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
|
||||
memset(output, 0, sizeof(COMP_BUF_SIZE));
|
||||
memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
|
||||
memset(output, 0, COMP_BUF_SIZE);
|
||||
memset(decomp_output, 0, COMP_BUF_SIZE);
|
||||
|
||||
ilen = ctemplate[i].inlen;
|
||||
ret = crypto_comp_compress(tfm, ctemplate[i].input,
|
||||
@@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
|
||||
int ilen;
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
|
||||
memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
|
||||
memset(decomp_output, 0, COMP_BUF_SIZE);
|
||||
|
||||
ilen = dtemplate[i].inlen;
|
||||
ret = crypto_comp_decompress(tfm, dtemplate[i].input,
|
||||
@@ -2661,6 +2661,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(serpent_cbc_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cbc(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_cbc_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "cbc(twofish)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -2784,6 +2790,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(serpent_ctr_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ctr(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_ctr_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ctr(twofish)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -3037,18 +3049,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(sm4_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(speck128)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(speck128_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(speck64)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(speck64_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(tea)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -3576,18 +3576,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(serpent_xts_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(speck128)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(speck128_xts_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(speck64)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(speck64_xts_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(twofish)",
|
||||
.test = alg_test_skcipher,
|
||||
|
863
crypto/testmgr.h
863
crypto/testmgr.h
@@ -24,8 +24,6 @@
|
||||
#ifndef _CRYPTO_TESTMGR_H
|
||||
#define _CRYPTO_TESTMGR_H
|
||||
|
||||
#include <linux/netlink.h>
|
||||
|
||||
#define MAX_DIGEST_SIZE 64
|
||||
#define MAX_TAP 8
|
||||
|
||||
@@ -10133,12 +10131,13 @@ static const struct cipher_testvec serpent_xts_tv_template[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* SM4 test vector taken from the draft RFC
|
||||
* https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
|
||||
* SM4 test vectors taken from the "The SM4 Blockcipher Algorithm And Its
|
||||
* Modes Of Operations" draft RFC
|
||||
* https://datatracker.ietf.org/doc/draft-ribose-cfrg-sm4
|
||||
*/
|
||||
|
||||
static const struct cipher_testvec sm4_tv_template[] = {
|
||||
{ /* SM4 Appendix A: Example Calculations. Example 1. */
|
||||
{ /* GB/T 32907-2016 Example 1. */
|
||||
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
|
||||
.klen = 16,
|
||||
@@ -10147,10 +10146,7 @@ static const struct cipher_testvec sm4_tv_template[] = {
|
||||
.ctext = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
|
||||
"\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
|
||||
.len = 16,
|
||||
}, { /*
|
||||
* SM4 Appendix A: Example Calculations.
|
||||
* Last 10 iterations of Example 2.
|
||||
*/
|
||||
}, { /* Last 10 iterations of GB/T 32907-2016 Example 2. */
|
||||
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
|
||||
.klen = 16,
|
||||
@@ -10195,744 +10191,116 @@ static const struct cipher_testvec sm4_tv_template[] = {
|
||||
"\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
|
||||
"\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
|
||||
.len = 160
|
||||
}, { /* A.2.1.1 SM4-ECB Example 1 */
|
||||
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
|
||||
.klen = 16,
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
|
||||
.ctext = "\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7"
|
||||
"\xb5\x17\x9f\x8f\x47\x4b\x86\x19"
|
||||
"\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9"
|
||||
"\x85\xf8\x1c\x84\x82\x19\x23\x04",
|
||||
.len = 32,
|
||||
}, { /* A.2.1.2 SM4-ECB Example 2 */
|
||||
.key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
|
||||
"\x01\x23\x45\x67\x89\xAB\xCD\xEF",
|
||||
.klen = 16,
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
|
||||
.ctext = "\xC5\x87\x68\x97\xE4\xA5\x9B\xBB"
|
||||
"\xA7\x2A\x10\xC8\x38\x72\x24\x5B"
|
||||
"\x12\xDD\x90\xBC\x2D\x20\x06\x92"
|
||||
"\xB5\x29\xA4\x15\x5A\xC9\xE6\x00",
|
||||
.len = 32,
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Speck test vectors taken from the original paper:
|
||||
* "The Simon and Speck Families of Lightweight Block Ciphers"
|
||||
* https://eprint.iacr.org/2013/404.pdf
|
||||
*
|
||||
* Note that the paper does not make byte and word order clear. But it was
|
||||
* confirmed with the authors that the intended orders are little endian byte
|
||||
* order and (y, x) word order. Equivalently, the printed test vectors, when
|
||||
* looking at only the bytes (ignoring the whitespace that divides them into
|
||||
* words), are backwards: the left-most byte is actually the one with the
|
||||
* highest memory address, while the right-most byte is actually the one with
|
||||
* the lowest memory address.
|
||||
*/
|
||||
|
||||
static const struct cipher_testvec speck128_tv_template[] = {
|
||||
{ /* Speck128/128 */
|
||||
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
||||
static const struct cipher_testvec sm4_cbc_tv_template[] = {
|
||||
{ /* A.2.2.1 SM4-CBC Example 1 */
|
||||
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
|
||||
.klen = 16,
|
||||
.ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74"
|
||||
"\x20\x65\x71\x75\x69\x76\x61\x6c",
|
||||
.ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
|
||||
"\x65\x32\x78\x79\x51\x98\x5d\xa6",
|
||||
.len = 16,
|
||||
}, { /* Speck128/192 */
|
||||
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17",
|
||||
.klen = 24,
|
||||
.ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
|
||||
"\x68\x69\x65\x66\x20\x48\x61\x72",
|
||||
.ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
|
||||
"\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
|
||||
.len = 16,
|
||||
}, { /* Speck128/256 */
|
||||
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
|
||||
.klen = 32,
|
||||
.ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
|
||||
"\x49\x6e\x20\x74\x68\x6f\x73\x65",
|
||||
.ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
|
||||
"\x3e\xf5\xc0\x05\x04\x01\x09\x41",
|
||||
.len = 16,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
|
||||
* ciphertext recomputed with Speck128 as the cipher
|
||||
*/
|
||||
static const struct cipher_testvec speck128_xts_tv_template[] = {
|
||||
{
|
||||
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.klen = 32,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
|
||||
"\x3b\x99\x4a\x64\x74\x77\xac\xed"
|
||||
"\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
|
||||
"\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
|
||||
.ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48"
|
||||
"\x31\x2A\xAE\xB2\x04\x02\x44\xCB"
|
||||
"\x4C\xB7\x01\x69\x51\x90\x92\x26"
|
||||
"\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\x11\x11\x11\x11\x11\x11\x11\x11"
|
||||
"\x11\x11\x11\x11\x11\x11\x11\x11"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22",
|
||||
.klen = 32,
|
||||
.iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44",
|
||||
.ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
|
||||
"\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
|
||||
"\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
|
||||
"\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
|
||||
}, { /* A.2.2.2 SM4-CBC Example 2 */
|
||||
.key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
|
||||
"\x01\x23\x45\x67\x89\xAB\xCD\xEF",
|
||||
.klen = 16,
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
|
||||
.ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98"
|
||||
"\x85\x72\x15\x58\x7b\x7b\xb5\x9a"
|
||||
"\x91\xf2\xc1\x47\x91\x1a\x41\x44"
|
||||
"\x66\x5e\x1f\xa1\xd4\x0b\xae\x38",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
|
||||
"\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22",
|
||||
.klen = 32,
|
||||
.iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44",
|
||||
.ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
|
||||
"\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
|
||||
"\xda\x63\xb2\xf1\x82\xb0\x89\x59"
|
||||
"\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\x27\x18\x28\x18\x28\x45\x90\x45"
|
||||
"\x23\x53\x60\x28\x74\x71\x35\x26"
|
||||
"\x31\x41\x59\x26\x53\x58\x97\x93"
|
||||
"\x23\x84\x62\x64\x33\x83\x27\x95",
|
||||
.klen = 32,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
|
||||
.ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
|
||||
"\x53\xd0\xed\x2d\x30\xc1\x20\xef"
|
||||
"\x70\x67\x5e\xff\x09\x70\xbb\xc1"
|
||||
"\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
|
||||
"\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
|
||||
"\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
|
||||
"\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
|
||||
"\x19\xc5\x58\x84\x63\xb9\x12\x68"
|
||||
"\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
|
||||
"\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
|
||||
"\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
|
||||
"\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
|
||||
"\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
|
||||
"\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
|
||||
"\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
|
||||
"\xa7\x49\xa0\x0e\x09\x33\x85\x50"
|
||||
"\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
|
||||
"\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
|
||||
"\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
|
||||
"\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
|
||||
"\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
|
||||
"\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
|
||||
"\xa5\x20\xac\x24\x1c\x73\x59\x73"
|
||||
"\x58\x61\x3a\x87\x58\xb3\x20\x56"
|
||||
"\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
|
||||
"\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
|
||||
"\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
|
||||
"\x09\x35\x71\x50\x65\xac\x92\xe3"
|
||||
"\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
|
||||
"\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
|
||||
"\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
|
||||
"\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
|
||||
"\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
|
||||
"\x2a\x26\xcc\x49\x14\x6d\x55\x01"
|
||||
"\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
|
||||
"\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
|
||||
"\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
|
||||
"\x24\xa9\x60\xa4\x97\x85\x86\x2a"
|
||||
"\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
|
||||
"\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
|
||||
"\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
|
||||
"\x87\x30\xac\xd5\xea\x73\x49\x10"
|
||||
"\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
|
||||
"\x66\x02\x35\x3d\x60\x06\x36\x4f"
|
||||
"\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
|
||||
"\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
|
||||
"\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
|
||||
"\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
|
||||
"\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
|
||||
"\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
|
||||
"\xbd\x48\x50\xcd\x75\x70\xc4\x62"
|
||||
"\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
|
||||
"\x51\x66\x02\x69\x04\x97\x36\xd4"
|
||||
"\x75\xae\x0b\xa3\x42\xf8\xca\x79"
|
||||
"\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
|
||||
"\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
|
||||
"\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
|
||||
"\x03\xe7\x05\x39\xf5\x05\x26\xee"
|
||||
"\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
|
||||
"\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
|
||||
"\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
|
||||
"\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
|
||||
"\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
|
||||
"\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
|
||||
.len = 512,
|
||||
}, {
|
||||
.key = "\x27\x18\x28\x18\x28\x45\x90\x45"
|
||||
"\x23\x53\x60\x28\x74\x71\x35\x26"
|
||||
"\x62\x49\x77\x57\x24\x70\x93\x69"
|
||||
"\x99\x59\x57\x49\x66\x96\x76\x27"
|
||||
"\x31\x41\x59\x26\x53\x58\x97\x93"
|
||||
"\x23\x84\x62\x64\x33\x83\x27\x95"
|
||||
"\x02\x88\x41\x97\x16\x93\x99\x37"
|
||||
"\x51\x05\x82\x09\x74\x94\x45\x92",
|
||||
.klen = 64,
|
||||
.iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
|
||||
.ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
|
||||
"\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
|
||||
"\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
|
||||
"\x92\x99\xde\xd3\x76\xed\xcd\x63"
|
||||
"\x64\x3a\x22\x57\xc1\x43\x49\xd4"
|
||||
"\x79\x36\x31\x19\x62\xae\x10\x7e"
|
||||
"\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
|
||||
"\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
|
||||
"\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
|
||||
"\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
|
||||
"\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
|
||||
"\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
|
||||
"\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
|
||||
"\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
|
||||
"\x85\x3c\x4f\x26\x64\x85\xbc\x68"
|
||||
"\xb0\xe0\x86\x5e\x26\x41\xce\x11"
|
||||
"\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
|
||||
"\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
|
||||
"\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
|
||||
"\x14\x4d\xf0\x74\x37\xfd\x07\x25"
|
||||
"\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
|
||||
"\x1b\x83\x4d\x15\x83\xac\x57\xa0"
|
||||
"\xac\xa5\xd0\x38\xef\x19\x56\x53"
|
||||
"\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
|
||||
"\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
|
||||
"\xed\x22\x34\x1c\x5d\xed\x17\x06"
|
||||
"\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
|
||||
"\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
|
||||
"\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
|
||||
"\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
|
||||
"\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
|
||||
"\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
|
||||
"\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
|
||||
"\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
|
||||
"\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
|
||||
"\x19\xf5\x94\xf9\xd2\x00\x33\x37"
|
||||
"\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
|
||||
"\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
|
||||
"\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
|
||||
"\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
|
||||
"\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
|
||||
"\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
|
||||
"\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
|
||||
"\xad\xb7\x73\xf8\x78\x12\xc8\x59"
|
||||
"\x17\x80\x4c\x57\x39\xf1\x6d\x80"
|
||||
"\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
|
||||
"\xec\xce\xb7\xc8\x02\x8a\xed\x53"
|
||||
"\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
|
||||
"\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
|
||||
"\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
|
||||
"\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
|
||||
"\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
|
||||
"\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
|
||||
"\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
|
||||
"\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
|
||||
"\xcc\x1f\x48\x49\x65\x47\x75\xe9"
|
||||
"\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
|
||||
"\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
|
||||
"\xa1\x51\x89\x3b\xeb\x96\x42\xac"
|
||||
"\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
|
||||
"\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
|
||||
"\x66\x8d\x13\xca\xe0\x59\x2a\x00"
|
||||
"\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
|
||||
"\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
|
||||
.len = 512,
|
||||
.also_non_np = 1,
|
||||
.np = 3,
|
||||
.tap = { 512 - 20, 4, 16 },
|
||||
}
|
||||
};
|
||||
|
||||
static const struct cipher_testvec speck64_tv_template[] = {
|
||||
{ /* Speck64/96 */
|
||||
.key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
|
||||
"\x10\x11\x12\x13",
|
||||
.klen = 12,
|
||||
.ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
|
||||
.ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
|
||||
.len = 8,
|
||||
}, { /* Speck64/128 */
|
||||
.key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
|
||||
"\x10\x11\x12\x13\x18\x19\x1a\x1b",
|
||||
static const struct cipher_testvec sm4_ctr_tv_template[] = {
|
||||
{ /* A.2.5.1 SM4-CTR Example 1 */
|
||||
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
|
||||
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
|
||||
.klen = 16,
|
||||
.ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
|
||||
.ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
|
||||
.len = 8,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Speck64-XTS test vectors, taken from the AES-XTS test vectors with the
|
||||
* ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted
|
||||
*/
|
||||
static const struct cipher_testvec speck64_xts_tv_template[] = {
|
||||
{
|
||||
.key = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.klen = 24,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
|
||||
"\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
|
||||
"\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
|
||||
"\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\x11\x11\x11\x11\x11\x11\x11\x11"
|
||||
"\x11\x11\x11\x11\x11\x11\x11\x11"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22",
|
||||
.klen = 24,
|
||||
.iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44",
|
||||
.ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
|
||||
"\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
|
||||
"\xb3\x12\x69\x7e\x36\xeb\x52\xff"
|
||||
"\x62\xdd\xba\x90\xb3\xe1\xee\x99",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
|
||||
"\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
|
||||
"\x22\x22\x22\x22\x22\x22\x22\x22",
|
||||
.klen = 24,
|
||||
.iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44"
|
||||
"\x44\x44\x44\x44\x44\x44\x44\x44",
|
||||
.ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
|
||||
"\x27\x36\xc0\xbf\x5d\xea\x36\x37"
|
||||
"\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
|
||||
"\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
|
||||
.len = 32,
|
||||
}, {
|
||||
.key = "\x27\x18\x28\x18\x28\x45\x90\x45"
|
||||
"\x23\x53\x60\x28\x74\x71\x35\x26"
|
||||
"\x31\x41\x59\x26\x53\x58\x97\x93",
|
||||
.klen = 24,
|
||||
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
|
||||
.ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
|
||||
"\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
|
||||
"\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
|
||||
"\x11\xc7\x39\x96\xd0\x95\xf4\x56"
|
||||
"\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
|
||||
"\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
|
||||
"\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
|
||||
"\xd5\xd2\x13\x86\x94\x34\xe9\x62"
|
||||
"\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
|
||||
"\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
|
||||
"\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
|
||||
"\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
|
||||
"\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
|
||||
"\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
|
||||
"\x29\xe5\xbe\x54\x30\xcb\x46\x95"
|
||||
"\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
|
||||
"\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
|
||||
"\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
|
||||
"\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
|
||||
"\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
|
||||
"\x82\xc0\x37\x27\xfc\x91\xa7\x05"
|
||||
"\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
|
||||
"\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
|
||||
"\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
|
||||
"\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
|
||||
"\x07\xff\xf3\x72\x74\x48\xb5\x40"
|
||||
"\x50\xb5\xdd\x90\x43\x31\x18\x15"
|
||||
"\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
|
||||
"\x29\x93\x90\x8b\xda\x07\xf0\x35"
|
||||
"\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
|
||||
"\x94\x12\xbb\x33\x27\x1d\x3f\x23"
|
||||
"\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
|
||||
"\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
|
||||
"\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
|
||||
"\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
|
||||
"\xcf\x37\xef\xc9\x11\x01\x5c\x45"
|
||||
"\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
|
||||
"\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
|
||||
"\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
|
||||
"\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
|
||||
"\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
|
||||
"\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
|
||||
"\x10\xff\xaf\xef\xca\xdd\x4f\x80"
|
||||
"\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
|
||||
"\x33\xca\x00\x8b\x8b\x3f\xea\xec"
|
||||
"\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
|
||||
"\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
|
||||
"\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
|
||||
"\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
|
||||
"\xc7\x1b\x53\x17\x02\xea\xd1\xad"
|
||||
"\x13\x51\x73\xc0\xa0\xb2\x05\x32"
|
||||
"\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
|
||||
"\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
|
||||
"\x59\xda\xee\x1a\x22\x18\xda\x0d"
|
||||
"\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
|
||||
"\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
|
||||
"\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
|
||||
"\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
|
||||
"\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
|
||||
"\x7d\x69\x8d\x00\x62\x77\x0d\x14"
|
||||
"\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
|
||||
"\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
|
||||
"\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
|
||||
"\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
|
||||
.len = 512,
|
||||
}, {
|
||||
.key = "\x27\x18\x28\x18\x28\x45\x90\x45"
|
||||
"\x23\x53\x60\x28\x74\x71\x35\x26"
|
||||
"\x62\x49\x77\x57\x24\x70\x93\x69"
|
||||
"\x99\x59\x57\x49\x66\x96\x76\x27",
|
||||
.klen = 32,
|
||||
.iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
.ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
|
||||
"\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
|
||||
"\x10\x11\x12\x13\x14\x15\x16\x17"
|
||||
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
|
||||
"\x20\x21\x22\x23\x24\x25\x26\x27"
|
||||
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
|
||||
"\x40\x41\x42\x43\x44\x45\x46\x47"
|
||||
"\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
|
||||
"\x50\x51\x52\x53\x54\x55\x56\x57"
|
||||
"\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
|
||||
"\x60\x61\x62\x63\x64\x65\x66\x67"
|
||||
"\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
|
||||
"\x70\x71\x72\x73\x74\x75\x76\x77"
|
||||
"\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
|
||||
"\x80\x81\x82\x83\x84\x85\x86\x87"
|
||||
"\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
|
||||
"\x90\x91\x92\x93\x94\x95\x96\x97"
|
||||
"\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
|
||||
"\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
|
||||
"\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
|
||||
"\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
|
||||
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
|
||||
"\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
|
||||
"\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
|
||||
"\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
|
||||
"\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
|
||||
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
|
||||
"\xe8\xe9\xea\xeb\xec\xed\xee\xef"
|
||||
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
|
||||
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
|
||||
.ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
|
||||
"\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
|
||||
"\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
|
||||
"\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
|
||||
"\x78\x16\xea\x80\xdb\x33\x75\x94"
|
||||
"\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
|
||||
"\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
|
||||
"\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
|
||||
"\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
|
||||
"\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
|
||||
"\xf5\xec\x32\x74\xa3\xb8\x03\x88"
|
||||
"\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
|
||||
"\x84\x5e\x46\xed\x20\x89\xb6\x44"
|
||||
"\x8d\xd0\xed\x54\x47\x16\xbe\x95"
|
||||
"\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
|
||||
"\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
|
||||
"\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
|
||||
"\x8f\x1e\x10\x07\x57\x25\x98\x7b"
|
||||
"\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
|
||||
"\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
|
||||
"\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
|
||||
"\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
|
||||
"\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
|
||||
"\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
|
||||
"\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
|
||||
"\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
|
||||
"\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
|
||||
"\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
|
||||
"\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
|
||||
"\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
|
||||
"\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
|
||||
"\x7b\x7a\x4a\xad\x35\x65\x27\xca"
|
||||
"\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
|
||||
"\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
|
||||
"\xfc\x42\xc8\x31\x59\xbe\x16\x60"
|
||||
"\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
|
||||
"\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
|
||||
"\x52\x7f\x29\x51\x94\x20\x67\x3c"
|
||||
"\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
|
||||
"\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
|
||||
"\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
|
||||
"\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
|
||||
"\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
|
||||
"\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
|
||||
"\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
|
||||
"\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
|
||||
"\x65\x53\x0f\x41\x11\xbd\x98\x99"
|
||||
"\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
|
||||
"\x84\x98\xf9\x34\xed\x33\x2a\x1f"
|
||||
"\x82\xed\xc1\x73\x98\xd3\x02\xdc"
|
||||
"\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
|
||||
"\x63\x51\x34\x9d\x96\x12\xae\xce"
|
||||
"\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
|
||||
"\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
|
||||
"\x54\x27\x74\xbb\x10\x86\x57\x46"
|
||||
"\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
|
||||
"\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
|
||||
"\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
|
||||
"\x7b\x4f\x38\x55\x36\x71\x64\xc1"
|
||||
"\xfc\x5c\x75\x52\x33\x02\x18\xf8"
|
||||
"\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
|
||||
"\x9b\x63\x76\x32\x2f\x19\x72\x10"
|
||||
"\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
|
||||
"\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
|
||||
.len = 512,
|
||||
.also_non_np = 1,
|
||||
.np = 3,
|
||||
.tap = { 512 - 20, 4, 16 },
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
|
||||
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
|
||||
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xee\xee\xee\xee"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
|
||||
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
|
||||
.ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07"
|
||||
"\x91\x36\x4c\x39\x5a\x13\x42\xd1"
|
||||
"\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd"
|
||||
"\x07\x4c\xce\x38\x5c\xdd\x70\xc7"
|
||||
"\xf2\x34\xbc\x0e\x24\xc1\x19\x80"
|
||||
"\xfd\x12\x86\x31\x0c\xe3\x7b\x92"
|
||||
"\x6e\x02\xfc\xd0\xfa\xa0\xba\xf3"
|
||||
"\x8b\x29\x33\x85\x1d\x82\x45\x14",
|
||||
.len = 64,
|
||||
}, { /* A.2.5.2 SM4-CTR Example 2 */
|
||||
.key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
|
||||
"\x01\x23\x45\x67\x89\xAB\xCD\xEF",
|
||||
.klen = 16,
|
||||
.ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
|
||||
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
|
||||
"\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
|
||||
"\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
|
||||
"\xee\xee\xee\xee\xee\xee\xee\xee"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
|
||||
"\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
|
||||
.ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74"
|
||||
"\x17\xa0\x85\x12\xee\x16\x0e\x2f"
|
||||
"\x8f\x66\x15\x21\xcb\xba\xb4\x4c"
|
||||
"\xc8\x71\x38\x44\x5b\xc2\x9e\x5c"
|
||||
"\x0a\xe0\x29\x72\x05\xd6\x27\x04"
|
||||
"\x17\x3b\x21\x23\x9b\x88\x7f\x6c"
|
||||
"\x8c\xb5\xb8\x00\x91\x7a\x24\x88"
|
||||
"\x28\x4b\xde\x9e\x16\xea\x29\x06",
|
||||
.len = 64,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -13883,6 +13251,27 @@ static const struct cipher_testvec aes_lrw_tv_template[] = {
|
||||
.ctext = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
|
||||
"\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
|
||||
.len = 16,
|
||||
}, { /* Test counter wrap-around, modified from LRW-32-AES 1 */
|
||||
.key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
|
||||
"\x4c\x26\x84\x14\xb5\x68\x01\x85"
|
||||
"\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
|
||||
"\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
|
||||
.klen = 32,
|
||||
.iv = "\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
.ptext = "\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x41\x42\x43\x44\x45\x46"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x41\x42\x43\x44\x45\x46"
|
||||
"\x30\x31\x32\x33\x34\x35\x36\x37"
|
||||
"\x38\x39\x41\x42\x43\x44\x45\x46",
|
||||
.ctext = "\x47\x90\x50\xf6\xf4\x8d\x5c\x7f"
|
||||
"\x84\xc7\x83\x95\x2d\xa2\x02\xc0"
|
||||
"\xda\x7f\xa3\xc0\x88\x2a\x0a\x50"
|
||||
"\xfb\xc1\x78\x03\x39\xfe\x1d\xe5"
|
||||
"\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
|
||||
"\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
|
||||
.len = 48,
|
||||
}, {
|
||||
/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
|
||||
.key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
|
||||
|
@@ -57,15 +57,17 @@ struct xcbc_desc_ctx {
|
||||
u8 ctx[];
|
||||
};
|
||||
|
||||
#define XCBC_BLOCKSIZE 16
|
||||
|
||||
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
|
||||
const u8 *inkey, unsigned int keylen)
|
||||
{
|
||||
unsigned long alignmask = crypto_shash_alignmask(parent);
|
||||
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
|
||||
int bs = crypto_shash_blocksize(parent);
|
||||
u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
|
||||
int err = 0;
|
||||
u8 key1[bs];
|
||||
u8 key1[XCBC_BLOCKSIZE];
|
||||
int bs = sizeof(key1);
|
||||
|
||||
if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
|
||||
return err;
|
||||
@@ -212,7 +214,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
return PTR_ERR(alg);
|
||||
|
||||
switch(alg->cra_blocksize) {
|
||||
case 16:
|
||||
case XCBC_BLOCKSIZE:
|
||||
break;
|
||||
default:
|
||||
goto out_put_alg;
|
||||
|
283
crypto/xts.c
283
crypto/xts.c
@@ -26,8 +26,6 @@
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
|
||||
#define XTS_BUFFER_SIZE 128u
|
||||
|
||||
struct priv {
|
||||
struct crypto_skcipher *child;
|
||||
struct crypto_cipher *tweak;
|
||||
@@ -39,19 +37,7 @@ struct xts_instance_ctx {
|
||||
};
|
||||
|
||||
struct rctx {
|
||||
le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
|
||||
|
||||
le128 t;
|
||||
|
||||
le128 *ext;
|
||||
|
||||
struct scatterlist srcbuf[2];
|
||||
struct scatterlist dstbuf[2];
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
|
||||
unsigned int left;
|
||||
|
||||
struct skcipher_request subreq;
|
||||
};
|
||||
|
||||
@@ -96,81 +82,27 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int post_crypt(struct skcipher_request *req)
|
||||
/*
|
||||
* We compute the tweak masks twice (both before and after the ECB encryption or
|
||||
* decryption) to avoid having to allocate a temporary buffer and/or make
|
||||
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
|
||||
* just doing the gf128mul_x_ble() calls again.
|
||||
*/
|
||||
static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
le128 *buf = rctx->ext ?: rctx->buf;
|
||||
struct skcipher_request *subreq;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const int bs = XTS_BLOCK_SIZE;
|
||||
struct skcipher_walk w;
|
||||
struct scatterlist *sg;
|
||||
unsigned offset;
|
||||
le128 t = rctx->t;
|
||||
int err;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
err = skcipher_walk_virt(&w, subreq, false);
|
||||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
le128 *wdst;
|
||||
|
||||
wdst = w.dst.virt.addr;
|
||||
|
||||
do {
|
||||
le128_xor(wdst, buf++, wdst);
|
||||
wdst++;
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
err = skcipher_walk_done(&w, avail);
|
||||
if (second_pass) {
|
||||
req = &rctx->subreq;
|
||||
/* set to our TFM to enforce correct alignment: */
|
||||
skcipher_request_set_tfm(req, tfm);
|
||||
}
|
||||
|
||||
rctx->left -= subreq->cryptlen;
|
||||
|
||||
if (err || !rctx->left)
|
||||
goto out;
|
||||
|
||||
rctx->dst = rctx->dstbuf;
|
||||
|
||||
scatterwalk_done(&w.out, 0, 1);
|
||||
sg = w.out.sg;
|
||||
offset = w.out.offset;
|
||||
|
||||
if (rctx->dst != sg) {
|
||||
rctx->dst[0] = *sg;
|
||||
sg_unmark_end(rctx->dst);
|
||||
scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
|
||||
}
|
||||
rctx->dst[0].length -= offset - sg->offset;
|
||||
rctx->dst[0].offset = offset;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pre_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
le128 *buf = rctx->ext ?: rctx->buf;
|
||||
struct skcipher_request *subreq;
|
||||
const int bs = XTS_BLOCK_SIZE;
|
||||
struct skcipher_walk w;
|
||||
struct scatterlist *sg;
|
||||
unsigned cryptlen;
|
||||
unsigned offset;
|
||||
bool more;
|
||||
int err;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
cryptlen = subreq->cryptlen;
|
||||
|
||||
more = rctx->left > cryptlen;
|
||||
if (!more)
|
||||
cryptlen = rctx->left;
|
||||
|
||||
skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
|
||||
cryptlen, NULL);
|
||||
|
||||
err = skcipher_walk_virt(&w, subreq, false);
|
||||
err = skcipher_walk_virt(&w, req, false);
|
||||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
@@ -181,180 +113,71 @@ static int pre_crypt(struct skcipher_request *req)
|
||||
wdst = w.dst.virt.addr;
|
||||
|
||||
do {
|
||||
*buf++ = rctx->t;
|
||||
le128_xor(wdst++, &rctx->t, wsrc++);
|
||||
gf128mul_x_ble(&rctx->t, &rctx->t);
|
||||
le128_xor(wdst++, &t, wsrc++);
|
||||
gf128mul_x_ble(&t, &t);
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
err = skcipher_walk_done(&w, avail);
|
||||
}
|
||||
|
||||
skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
|
||||
cryptlen, NULL);
|
||||
|
||||
if (err || !more)
|
||||
goto out;
|
||||
|
||||
rctx->src = rctx->srcbuf;
|
||||
|
||||
scatterwalk_done(&w.in, 0, 1);
|
||||
sg = w.in.sg;
|
||||
offset = w.in.offset;
|
||||
|
||||
if (rctx->src != sg) {
|
||||
rctx->src[0] = *sg;
|
||||
sg_unmark_end(rctx->src);
|
||||
scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
|
||||
}
|
||||
rctx->src[0].length -= offset - sg->offset;
|
||||
rctx->src[0].offset = offset;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
|
||||
static int xor_tweak_pre(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, false);
|
||||
}
|
||||
|
||||
static int xor_tweak_post(struct skcipher_request *req)
|
||||
{
|
||||
return xor_tweak(req, true);
|
||||
}
|
||||
|
||||
static void crypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err)
|
||||
err = xor_tweak_post(req);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static void init_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
gfp_t gfp;
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
skcipher_request_set_tfm(subreq, ctx->child);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, done, req);
|
||||
|
||||
gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
||||
GFP_ATOMIC;
|
||||
rctx->ext = NULL;
|
||||
|
||||
subreq->cryptlen = XTS_BUFFER_SIZE;
|
||||
if (req->cryptlen > XTS_BUFFER_SIZE) {
|
||||
unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
|
||||
|
||||
rctx->ext = kmalloc(n, gfp);
|
||||
if (rctx->ext)
|
||||
subreq->cryptlen = n;
|
||||
}
|
||||
|
||||
rctx->src = req->src;
|
||||
rctx->dst = req->dst;
|
||||
rctx->left = req->cryptlen;
|
||||
skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
|
||||
skcipher_request_set_crypt(subreq, req->dst, req->dst,
|
||||
req->cryptlen, NULL);
|
||||
|
||||
/* calculate first value of T */
|
||||
crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exit_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
|
||||
rctx->left = 0;
|
||||
|
||||
if (rctx->ext)
|
||||
kzfree(rctx->ext);
|
||||
}
|
||||
|
||||
static int do_encrypt(struct skcipher_request *req, int err)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
|
||||
while (!err && rctx->left) {
|
||||
err = pre_crypt(req) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
exit_crypt(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void encrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
struct skcipher_request *subreq;
|
||||
struct rctx *rctx;
|
||||
|
||||
rctx = skcipher_request_ctx(req);
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (rctx->left != req->cryptlen)
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
|
||||
err = do_encrypt(req, err ?: post_crypt(req));
|
||||
if (rctx->left)
|
||||
return;
|
||||
|
||||
out:
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return do_encrypt(req, init_crypt(req, encrypt_done));
|
||||
}
|
||||
|
||||
static int do_decrypt(struct skcipher_request *req, int err)
|
||||
{
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq;
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
|
||||
while (!err && rctx->left) {
|
||||
err = pre_crypt(req) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
post_crypt(req);
|
||||
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return err;
|
||||
}
|
||||
|
||||
exit_crypt(req);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void decrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
struct skcipher_request *subreq;
|
||||
struct rctx *rctx;
|
||||
|
||||
rctx = skcipher_request_ctx(req);
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (rctx->left != req->cryptlen)
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
subreq = &rctx->subreq;
|
||||
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
||||
|
||||
err = do_decrypt(req, err ?: post_crypt(req));
|
||||
if (rctx->left)
|
||||
return;
|
||||
|
||||
out:
|
||||
skcipher_request_complete(req, err);
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_encrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return do_decrypt(req, init_crypt(req, decrypt_done));
|
||||
struct rctx *rctx = skcipher_request_ctx(req);
|
||||
struct skcipher_request *subreq = &rctx->subreq;
|
||||
|
||||
init_crypt(req);
|
||||
return xor_tweak_pre(req) ?:
|
||||
crypto_skcipher_decrypt(subreq) ?:
|
||||
xor_tweak_post(req);
|
||||
}
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
|
Viittaa uudesa ongelmassa
Block a user