Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:

   - Decryption test vectors are now automatically generated from
     encryption test vectors.

  Algorithms:

   - Fix unaligned access issues in crc32/crc32c.

   - Add zstd compression algorithm.

   - Add AEGIS.

   - Add MORUS.

  Drivers:

   - Add accelerated AEGIS/MORUS on x86.

   - Add accelerated SM4 on arm64.

   - Removed x86 assembly salsa implementation as it is slower than C.

   - Add authenc(hmac(sha*), cbc(aes)) support in inside-secure.

   - Add ctr(aes) support in crypto4xx.

   - Add hardware key support in ccree.

   - Add support for new Centaur CPU in via-rng"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (112 commits)
  crypto: chtls - free beyond end rspq_skb_cache
  crypto: chtls - kbuild warnings
  crypto: chtls - dereference null variable
  crypto: chtls - wait for memory sendmsg, sendpage
  crypto: chtls - key len correction
  crypto: salsa20 - Revert "crypto: salsa20 - export generic helpers"
  crypto: x86/salsa20 - remove x86 salsa20 implementations
  crypto: ccp - Add GET_ID SEV command
  crypto: ccp - Add DOWNLOAD_FIRMWARE SEV command
  crypto: qat - Add MODULE_FIRMWARE for all qat drivers
  crypto: ccree - silence debug prints
  crypto: ccree - better clock handling
  crypto: ccree - correct host regs offset
  crypto: chelsio - Remove separate buffer used for DMA map B0 block in CCM
  crypt: chelsio - Send IV as Immediate for cipher algo
  crypto: chelsio - Return -ENOSPC for transient busy indication.
  crypto: caam/qi - fix warning in init_cgr()
  crypto: caam - fix rfc4543 descriptors
  crypto: caam - fix MC firmware detection
  crypto: clarify licensing of OpenSSL asm code
  ...
This commit is contained in:
Linus Torvalds
2018-06-05 15:51:21 -07:00
141 changed files with 20660 additions and 15353 deletions

View File

@@ -289,6 +289,107 @@ config CRYPTO_CHACHA20POLY1305
with the Poly1305 authenticator. It is defined in RFC7539 for use in
IETF protocols.
config CRYPTO_AEGIS128
tristate "AEGIS-128 AEAD algorithm"
select CRYPTO_AEAD
select CRYPTO_AES # for AES S-box tables
help
Support for the AEGIS-128 dedicated AEAD algorithm.
config CRYPTO_AEGIS128L
tristate "AEGIS-128L AEAD algorithm"
select CRYPTO_AEAD
select CRYPTO_AES # for AES S-box tables
help
Support for the AEGIS-128L dedicated AEAD algorithm.
config CRYPTO_AEGIS256
tristate "AEGIS-256 AEAD algorithm"
select CRYPTO_AEAD
select CRYPTO_AES # for AES S-box tables
help
Support for the AEGIS-256 dedicated AEAD algorithm.
config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm.
config CRYPTO_AEGIS128L_AESNI_SSE2
tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm.
config CRYPTO_AEGIS256_AESNI_SSE2
tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm.
config CRYPTO_MORUS640
tristate "MORUS-640 AEAD algorithm"
select CRYPTO_AEAD
help
Support for the MORUS-640 dedicated AEAD algorithm.
config CRYPTO_MORUS640_GLUE
tristate
depends on X86
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
algorithm.
config CRYPTO_MORUS640_SSE2
tristate "MORUS-640 AEAD algorithm (x86_64 SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_MORUS640_GLUE
help
SSE2 implementation of the MORUS-640 dedicated AEAD algorithm.
config CRYPTO_MORUS1280
tristate "MORUS-1280 AEAD algorithm"
select CRYPTO_AEAD
help
Support for the MORUS-1280 dedicated AEAD algorithm.
config CRYPTO_MORUS1280_GLUE
tristate
depends on X86
select CRYPTO_AEAD
select CRYPTO_CRYPTD
help
Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
algorithm.
config CRYPTO_MORUS1280_SSE2
tristate "MORUS-1280 AEAD algorithm (x86_64 SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_MORUS1280_GLUE
help
SSE2 optimizedimplementation of the MORUS-1280 dedicated AEAD
algorithm.
config CRYPTO_MORUS1280_AVX2
tristate "MORUS-1280 AEAD algorithm (x86_64 AVX2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_MORUS1280_GLUE
help
AVX2 optimized implementation of the MORUS-1280 dedicated AEAD
algorithm.
config CRYPTO_SEQIV
tristate "Sequence Number IV Generator"
select CRYPTO_AEAD
@@ -1335,34 +1436,6 @@ config CRYPTO_SALSA20
The Salsa20 stream cipher algorithm is designed by Daniel J.
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
config CRYPTO_SALSA20_586
tristate "Salsa20 stream cipher algorithm (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_BLKCIPHER
select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
The Salsa20 stream cipher algorithm is designed by Daniel J.
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
config CRYPTO_SALSA20_X86_64
tristate "Salsa20 stream cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_BLKCIPHER
select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
The Salsa20 stream cipher algorithm is designed by Daniel J.
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
config CRYPTO_CHACHA20
tristate "ChaCha20 cipher algorithm"
select CRYPTO_BLKCIPHER
@@ -1695,6 +1768,15 @@ config CRYPTO_LZ4HC
help
This is the LZ4 high compression mode algorithm.
config CRYPTO_ZSTD
tristate "Zstd compression algorithm"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
help
This is the zstd algorithm.
comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG

View File

@@ -86,6 +86,11 @@ obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o
obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o
obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o
obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
@@ -137,6 +142,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o

80
crypto/aegis.h Normal file
View File

@@ -0,0 +1,80 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* AEGIS common definitions
*
* Copyright (c) 2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (c) 2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#ifndef _CRYPTO_AEGIS_H
#define _CRYPTO_AEGIS_H
#include <crypto/aes.h>
#include <linux/types.h>
#define AEGIS_BLOCK_SIZE 16
union aegis_block {
__le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)];
u8 bytes[AEGIS_BLOCK_SIZE];
};
#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block))
#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
static const union aegis_block crypto_aegis_const[2] = {
{ .words64 = {
cpu_to_le64(U64_C(0x0d08050302010100)),
cpu_to_le64(U64_C(0x6279e99059372215)),
} },
{ .words64 = {
cpu_to_le64(U64_C(0xf12fc26d55183ddb)),
cpu_to_le64(U64_C(0xdd28b57342311120)),
} },
};
static void crypto_aegis_block_xor(union aegis_block *dst,
const union aegis_block *src)
{
dst->words64[0] ^= src->words64[0];
dst->words64[1] ^= src->words64[1];
}
static void crypto_aegis_block_and(union aegis_block *dst,
const union aegis_block *src)
{
dst->words64[0] &= src->words64[0];
dst->words64[1] &= src->words64[1];
}
static void crypto_aegis_aesenc(union aegis_block *dst,
const union aegis_block *src,
const union aegis_block *key)
{
u32 *d = dst->words32;
const u8 *s = src->bytes;
const u32 *k = key->words32;
const u32 *t0 = crypto_ft_tab[0];
const u32 *t1 = crypto_ft_tab[1];
const u32 *t2 = crypto_ft_tab[2];
const u32 *t3 = crypto_ft_tab[3];
u32 d0, d1, d2, d3;
d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0];
d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1];
d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2];
d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3];
d[0] = d0;
d[1] = d1;
d[2] = d2;
d[3] = d3;
}
#endif /* _CRYPTO_AEGIS_H */

463
crypto/aegis128.c Normal file
View File

@@ -0,0 +1,463 @@
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include "aegis.h"
#define AEGIS128_NONCE_SIZE 16
#define AEGIS128_STATE_BLOCKS 5
#define AEGIS128_KEY_SIZE 16
#define AEGIS128_MIN_AUTH_SIZE 8
#define AEGIS128_MAX_AUTH_SIZE 16
struct aegis_state {
union aegis_block blocks[AEGIS128_STATE_BLOCKS];
};
struct aegis_ctx {
union aegis_block key;
};
struct aegis128_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
};
static void crypto_aegis128_update(struct aegis_state *state)
{
union aegis_block tmp;
unsigned int i;
tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1];
for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--)
crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
&state->blocks[i]);
crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
}
static void crypto_aegis128_update_a(struct aegis_state *state,
const union aegis_block *msg)
{
crypto_aegis128_update(state);
crypto_aegis_block_xor(&state->blocks[0], msg);
}
static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg)
{
crypto_aegis128_update(state);
crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
}
static void crypto_aegis128_init(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
union aegis_block key_iv;
unsigned int i;
key_iv = *key;
crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE);
state->blocks[0] = key_iv;
state->blocks[1] = crypto_aegis_const[1];
state->blocks[2] = crypto_aegis_const[0];
state->blocks[3] = *key;
state->blocks[4] = *key;
crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]);
crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]);
for (i = 0; i < 5; i++) {
crypto_aegis128_update_a(state, key);
crypto_aegis128_update_a(state, &key_iv);
}
}
static void crypto_aegis128_ad(struct aegis_state *state,
const u8 *src, unsigned int size)
{
if (AEGIS_ALIGNED(src)) {
const union aegis_block *src_blk =
(const union aegis_block *)src;
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_a(state, src_blk);
size -= AEGIS_BLOCK_SIZE;
src_blk++;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_u(state, src);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
}
}
}
static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, src_blk);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_u(state, src);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis128_update_a(state, &msg);
crypto_aegis_block_xor(&msg, &tmp);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, &tmp);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_a(state, &tmp);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&msg, &tmp);
memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
crypto_aegis128_update_a(state, &msg);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128_process_ad(struct aegis_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
union aegis_block buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = AEGIS_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128_update_a(state, &buf);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128_ad(state, src, left);
src += left & ~(AEGIS_BLOCK_SIZE - 1);
left &= AEGIS_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
crypto_aegis128_update_a(state, &buf);
}
}
static void crypto_aegis128_process_crypt(struct aegis_state *state,
struct aead_request *req,
const struct aegis128_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
skcipher_walk_done(&walk, 0);
}
}
static void crypto_aegis128_final(struct aegis_state *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
union aegis_block tmp;
unsigned int i;
tmp.words64[0] = cpu_to_le64(assocbits);
tmp.words64[1] = cpu_to_le64(cryptbits);
crypto_aegis_block_xor(&tmp, &state->blocks[3]);
for (i = 0; i < 7; i++)
crypto_aegis128_update_a(state, &tmp);
for (i = 0; i < AEGIS128_STATE_BLOCKS; i++)
crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
}
static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != AEGIS128_KEY_SIZE) {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
return 0;
}
static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > AEGIS128_MAX_AUTH_SIZE)
return -EINVAL;
if (authsize < AEGIS128_MIN_AUTH_SIZE)
return -EINVAL;
return 0;
}
static void crypto_aegis128_crypt(struct aead_request *req,
union aegis_block *tag_xor,
unsigned int cryptlen,
const struct aegis128_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct aegis_state state;
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_process_crypt(&state, req, ops);
crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen);
}
static int crypto_aegis128_encrypt(struct aead_request *req)
{
static const struct aegis128_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_chunk = crypto_aegis128_encrypt_chunk,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_aegis128_crypt(req, &tag, cryptlen, &ops);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis128_decrypt(struct aead_request *req)
{
static const struct aegis128_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_chunk = crypto_aegis128_decrypt_chunk,
};
static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
crypto_aegis128_crypt(req, &tag, cryptlen, &ops);
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
}
static int crypto_aegis128_init_tfm(struct crypto_aead *tfm)
{
return 0;
}
static void crypto_aegis128_exit_tfm(struct crypto_aead *tfm)
{
}
static struct aead_alg crypto_aegis128_alg = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt,
.decrypt = crypto_aegis128_decrypt,
.init = crypto_aegis128_init_tfm,
.exit = crypto_aegis128_exit_tfm,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
.cra_priority = 100,
.cra_name = "aegis128",
.cra_driver_name = "aegis128-generic",
.cra_module = THIS_MODULE,
}
};
static int __init crypto_aegis128_module_init(void)
{
return crypto_register_aead(&crypto_aegis128_alg);
}
static void __exit crypto_aegis128_module_exit(void)
{
crypto_unregister_aead(&crypto_aegis128_alg);
}
module_init(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm");
MODULE_ALIAS_CRYPTO("aegis128");
MODULE_ALIAS_CRYPTO("aegis128-generic");

527
crypto/aegis128l.c Normal file
View File

@@ -0,0 +1,527 @@
/*
* The AEGIS-128L Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include "aegis.h"
#define AEGIS128L_CHUNK_BLOCKS 2
#define AEGIS128L_CHUNK_SIZE (AEGIS128L_CHUNK_BLOCKS * AEGIS_BLOCK_SIZE)
#define AEGIS128L_NONCE_SIZE 16
#define AEGIS128L_STATE_BLOCKS 8
#define AEGIS128L_KEY_SIZE 16
#define AEGIS128L_MIN_AUTH_SIZE 8
#define AEGIS128L_MAX_AUTH_SIZE 16
union aegis_chunk {
union aegis_block blocks[AEGIS128L_CHUNK_BLOCKS];
u8 bytes[AEGIS128L_CHUNK_SIZE];
};
struct aegis_state {
union aegis_block blocks[AEGIS128L_STATE_BLOCKS];
};
struct aegis_ctx {
union aegis_block key;
};
struct aegis128l_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
};
static void crypto_aegis128l_update(struct aegis_state *state)
{
union aegis_block tmp;
unsigned int i;
tmp = state->blocks[AEGIS128L_STATE_BLOCKS - 1];
for (i = AEGIS128L_STATE_BLOCKS - 1; i > 0; i--)
crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
&state->blocks[i]);
crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
}
static void crypto_aegis128l_update_a(struct aegis_state *state,
const union aegis_chunk *msg)
{
crypto_aegis128l_update(state);
crypto_aegis_block_xor(&state->blocks[0], &msg->blocks[0]);
crypto_aegis_block_xor(&state->blocks[4], &msg->blocks[1]);
}
static void crypto_aegis128l_update_u(struct aegis_state *state,
const void *msg)
{
crypto_aegis128l_update(state);
crypto_xor(state->blocks[0].bytes, msg + 0 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
crypto_xor(state->blocks[4].bytes, msg + 1 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
}
static void crypto_aegis128l_init(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
union aegis_block key_iv;
union aegis_chunk chunk;
unsigned int i;
memcpy(chunk.blocks[0].bytes, iv, AEGIS_BLOCK_SIZE);
chunk.blocks[1] = *key;
key_iv = *key;
crypto_aegis_block_xor(&key_iv, &chunk.blocks[0]);
state->blocks[0] = key_iv;
state->blocks[1] = crypto_aegis_const[1];
state->blocks[2] = crypto_aegis_const[0];
state->blocks[3] = crypto_aegis_const[1];
state->blocks[4] = key_iv;
state->blocks[5] = *key;
state->blocks[6] = *key;
state->blocks[7] = *key;
crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[0]);
crypto_aegis_block_xor(&state->blocks[6], &crypto_aegis_const[1]);
crypto_aegis_block_xor(&state->blocks[7], &crypto_aegis_const[0]);
for (i = 0; i < 10; i++) {
crypto_aegis128l_update_a(state, &chunk);
}
}
static void crypto_aegis128l_ad(struct aegis_state *state,
const u8 *src, unsigned int size)
{
if (AEGIS_ALIGNED(src)) {
const union aegis_chunk *src_chunk =
(const union aegis_chunk *)src;
while (size >= AEGIS128L_CHUNK_SIZE) {
crypto_aegis128l_update_a(state, src_chunk);
size -= AEGIS128L_CHUNK_SIZE;
src_chunk += 1;
}
} else {
while (size >= AEGIS128L_CHUNK_SIZE) {
crypto_aegis128l_update_u(state, src);
size -= AEGIS128L_CHUNK_SIZE;
src += AEGIS128L_CHUNK_SIZE;
}
}
}
static void crypto_aegis128l_encrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_chunk tmp;
union aegis_block *tmp0 = &tmp.blocks[0];
union aegis_block *tmp1 = &tmp.blocks[1];
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS128L_CHUNK_SIZE) {
union aegis_chunk *dst_blk =
(union aegis_chunk *)dst;
const union aegis_chunk *src_blk =
(const union aegis_chunk *)src;
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]);
crypto_aegis128l_update_a(state, src_blk);
*dst_blk = tmp;
size -= AEGIS128L_CHUNK_SIZE;
src += AEGIS128L_CHUNK_SIZE;
dst += AEGIS128L_CHUNK_SIZE;
}
} else {
while (size >= AEGIS128L_CHUNK_SIZE) {
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
crypto_aegis128l_update_u(state, src);
memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE);
size -= AEGIS128L_CHUNK_SIZE;
src += AEGIS128L_CHUNK_SIZE;
dst += AEGIS128L_CHUNK_SIZE;
}
}
if (size > 0) {
union aegis_chunk msg = {};
memcpy(msg.bytes, src, size);
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_aegis128l_update_a(state, &msg);
crypto_aegis_block_xor(&msg.blocks[0], tmp0);
crypto_aegis_block_xor(&msg.blocks[1], tmp1);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128l_decrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_chunk tmp;
union aegis_block *tmp0 = &tmp.blocks[0];
union aegis_block *tmp1 = &tmp.blocks[1];
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS128L_CHUNK_SIZE) {
union aegis_chunk *dst_blk =
(union aegis_chunk *)dst;
const union aegis_chunk *src_blk =
(const union aegis_chunk *)src;
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]);
crypto_aegis128l_update_a(state, &tmp);
*dst_blk = tmp;
size -= AEGIS128L_CHUNK_SIZE;
src += AEGIS128L_CHUNK_SIZE;
dst += AEGIS128L_CHUNK_SIZE;
}
} else {
while (size >= AEGIS128L_CHUNK_SIZE) {
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
crypto_aegis128l_update_a(state, &tmp);
memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE);
size -= AEGIS128L_CHUNK_SIZE;
src += AEGIS128L_CHUNK_SIZE;
dst += AEGIS128L_CHUNK_SIZE;
}
}
if (size > 0) {
union aegis_chunk msg = {};
memcpy(msg.bytes, src, size);
*tmp0 = state->blocks[2];
crypto_aegis_block_and(tmp0, &state->blocks[3]);
crypto_aegis_block_xor(tmp0, &state->blocks[6]);
crypto_aegis_block_xor(tmp0, &state->blocks[1]);
crypto_aegis_block_xor(&msg.blocks[0], tmp0);
*tmp1 = state->blocks[6];
crypto_aegis_block_and(tmp1, &state->blocks[7]);
crypto_aegis_block_xor(tmp1, &state->blocks[5]);
crypto_aegis_block_xor(tmp1, &state->blocks[2]);
crypto_aegis_block_xor(&msg.blocks[1], tmp1);
memset(msg.bytes + size, 0, AEGIS128L_CHUNK_SIZE - size);
crypto_aegis128l_update_a(state, &msg);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128l_process_ad(struct aegis_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
union aegis_chunk buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS128L_CHUNK_SIZE) {
if (pos > 0) {
unsigned int fill = AEGIS128L_CHUNK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128l_update_a(state, &buf);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128l_ad(state, src, left);
src += left & ~(AEGIS128L_CHUNK_SIZE - 1);
left &= AEGIS128L_CHUNK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS128L_CHUNK_SIZE - pos);
crypto_aegis128l_update_a(state, &buf);
}
}
static void crypto_aegis128l_process_crypt(struct aegis_state *state,
struct aead_request *req,
const struct aegis128l_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
skcipher_walk_done(&walk, 0);
}
}
static void crypto_aegis128l_final(struct aegis_state *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
union aegis_chunk tmp;
unsigned int i;
tmp.blocks[0].words64[0] = cpu_to_le64(assocbits);
tmp.blocks[0].words64[1] = cpu_to_le64(cryptbits);
crypto_aegis_block_xor(&tmp.blocks[0], &state->blocks[2]);
tmp.blocks[1] = tmp.blocks[0];
for (i = 0; i < 7; i++)
crypto_aegis128l_update_a(state, &tmp);
for (i = 0; i < 7; i++)
crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
}
static int crypto_aegis128l_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != AEGIS128L_KEY_SIZE) {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE);
return 0;
}
static int crypto_aegis128l_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > AEGIS128L_MAX_AUTH_SIZE)
return -EINVAL;
if (authsize < AEGIS128L_MIN_AUTH_SIZE)
return -EINVAL;
return 0;
}
static void crypto_aegis128l_crypt(struct aead_request *req,
union aegis_block *tag_xor,
unsigned int cryptlen,
const struct aegis128l_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct aegis_state state;
crypto_aegis128l_init(&state, &ctx->key, req->iv);
crypto_aegis128l_process_ad(&state, req->src, req->assoclen);
crypto_aegis128l_process_crypt(&state, req, ops);
crypto_aegis128l_final(&state, tag_xor, req->assoclen, cryptlen);
}
static int crypto_aegis128l_encrypt(struct aead_request *req)
{
static const struct aegis128l_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_chunk = crypto_aegis128l_encrypt_chunk,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_aegis128l_crypt(req, &tag, cryptlen, &ops);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis128l_decrypt(struct aead_request *req)
{
static const struct aegis128l_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_chunk = crypto_aegis128l_decrypt_chunk,
};
static const u8 zeros[AEGIS128L_MAX_AUTH_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
crypto_aegis128l_crypt(req, &tag, cryptlen, &ops);
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
}
static int crypto_aegis128l_init_tfm(struct crypto_aead *tfm)
{
return 0;
}
static void crypto_aegis128l_exit_tfm(struct crypto_aead *tfm)
{
}
static struct aead_alg crypto_aegis128l_alg = {
.setkey = crypto_aegis128l_setkey,
.setauthsize = crypto_aegis128l_setauthsize,
.encrypt = crypto_aegis128l_encrypt,
.decrypt = crypto_aegis128l_decrypt,
.init = crypto_aegis128l_init_tfm,
.exit = crypto_aegis128l_exit_tfm,
.ivsize = AEGIS128L_NONCE_SIZE,
.maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
.chunksize = AEGIS128L_CHUNK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
.cra_priority = 100,
.cra_name = "aegis128l",
.cra_driver_name = "aegis128l-generic",
.cra_module = THIS_MODULE,
}
};
static int __init crypto_aegis128l_module_init(void)
{
return crypto_register_aead(&crypto_aegis128l_alg);
}
static void __exit crypto_aegis128l_module_exit(void)
{
crypto_unregister_aead(&crypto_aegis128l_alg);
}
module_init(crypto_aegis128l_module_init);
module_exit(crypto_aegis128l_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm");
MODULE_ALIAS_CRYPTO("aegis128l");
MODULE_ALIAS_CRYPTO("aegis128l-generic");

478
crypto/aegis256.c Normal file
View File

@@ -0,0 +1,478 @@
/*
* The AEGIS-256 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include "aegis.h"
#define AEGIS256_NONCE_SIZE 32
#define AEGIS256_STATE_BLOCKS 6
#define AEGIS256_KEY_SIZE 32
#define AEGIS256_MIN_AUTH_SIZE 8
#define AEGIS256_MAX_AUTH_SIZE 16
struct aegis_state {
union aegis_block blocks[AEGIS256_STATE_BLOCKS];
};
struct aegis_ctx {
union aegis_block key[AEGIS256_KEY_SIZE / AEGIS_BLOCK_SIZE];
};
struct aegis256_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_chunk)(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
};
static void crypto_aegis256_update(struct aegis_state *state)
{
union aegis_block tmp;
unsigned int i;
tmp = state->blocks[AEGIS256_STATE_BLOCKS - 1];
for (i = AEGIS256_STATE_BLOCKS - 1; i > 0; i--)
crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
&state->blocks[i]);
crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
}
static void crypto_aegis256_update_a(struct aegis_state *state,
const union aegis_block *msg)
{
crypto_aegis256_update(state);
crypto_aegis_block_xor(&state->blocks[0], msg);
}
static void crypto_aegis256_update_u(struct aegis_state *state, const void *msg)
{
crypto_aegis256_update(state);
crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
}
static void crypto_aegis256_init(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
union aegis_block key_iv[2];
unsigned int i;
key_iv[0] = key[0];
key_iv[1] = key[1];
crypto_xor(key_iv[0].bytes, iv + 0 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
crypto_xor(key_iv[1].bytes, iv + 1 * AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
state->blocks[0] = key_iv[0];
state->blocks[1] = key_iv[1];
state->blocks[2] = crypto_aegis_const[1];
state->blocks[3] = crypto_aegis_const[0];
state->blocks[4] = key[0];
state->blocks[5] = key[1];
crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[0]);
crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[1]);
for (i = 0; i < 4; i++) {
crypto_aegis256_update_a(state, &key[0]);
crypto_aegis256_update_a(state, &key[1]);
crypto_aegis256_update_a(state, &key_iv[0]);
crypto_aegis256_update_a(state, &key_iv[1]);
}
}
static void crypto_aegis256_ad(struct aegis_state *state,
const u8 *src, unsigned int size)
{
if (AEGIS_ALIGNED(src)) {
const union aegis_block *src_blk =
(const union aegis_block *)src;
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis256_update_a(state, src_blk);
size -= AEGIS_BLOCK_SIZE;
src_blk++;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis256_update_u(state, src);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
}
}
}
static void crypto_aegis256_encrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis256_update_a(state, src_blk);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis256_update_u(state, src);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis256_update_a(state, &msg);
crypto_aegis_block_xor(&msg, &tmp);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis256_decrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis256_update_a(state, &tmp);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis256_update_a(state, &tmp);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[5]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&msg, &tmp);
memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
crypto_aegis256_update_a(state, &msg);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis256_process_ad(struct aegis_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
union aegis_block buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = AEGIS_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis256_update_a(state, &buf);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis256_ad(state, src, left);
src += left & ~(AEGIS_BLOCK_SIZE - 1);
left &= AEGIS_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
crypto_aegis256_update_a(state, &buf);
}
}
static void crypto_aegis256_process_crypt(struct aegis_state *state,
struct aead_request *req,
const struct aegis256_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
skcipher_walk_done(&walk, 0);
}
}
static void crypto_aegis256_final(struct aegis_state *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
union aegis_block tmp;
unsigned int i;
tmp.words64[0] = cpu_to_le64(assocbits);
tmp.words64[1] = cpu_to_le64(cryptbits);
crypto_aegis_block_xor(&tmp, &state->blocks[3]);
for (i = 0; i < 7; i++)
crypto_aegis256_update_a(state, &tmp);
for (i = 0; i < AEGIS256_STATE_BLOCKS; i++)
crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
}
static int crypto_aegis256_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != AEGIS256_KEY_SIZE) {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key[0].bytes, key, AEGIS_BLOCK_SIZE);
memcpy(ctx->key[1].bytes, key + AEGIS_BLOCK_SIZE,
AEGIS_BLOCK_SIZE);
return 0;
}
static int crypto_aegis256_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > AEGIS256_MAX_AUTH_SIZE)
return -EINVAL;
if (authsize < AEGIS256_MIN_AUTH_SIZE)
return -EINVAL;
return 0;
}
static void crypto_aegis256_crypt(struct aead_request *req,
union aegis_block *tag_xor,
unsigned int cryptlen,
const struct aegis256_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct aegis_state state;
crypto_aegis256_init(&state, ctx->key, req->iv);
crypto_aegis256_process_ad(&state, req->src, req->assoclen);
crypto_aegis256_process_crypt(&state, req, ops);
crypto_aegis256_final(&state, tag_xor, req->assoclen, cryptlen);
}
static int crypto_aegis256_encrypt(struct aead_request *req)
{
static const struct aegis256_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_chunk = crypto_aegis256_encrypt_chunk,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_aegis256_crypt(req, &tag, cryptlen, &ops);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis256_decrypt(struct aead_request *req)
{
static const struct aegis256_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_chunk = crypto_aegis256_decrypt_chunk,
};
static const u8 zeros[AEGIS256_MAX_AUTH_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
crypto_aegis256_crypt(req, &tag, cryptlen, &ops);
return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
}
static int crypto_aegis256_init_tfm(struct crypto_aead *tfm)
{
return 0;
}
static void crypto_aegis256_exit_tfm(struct crypto_aead *tfm)
{
}
static struct aead_alg crypto_aegis256_alg = {
.setkey = crypto_aegis256_setkey,
.setauthsize = crypto_aegis256_setauthsize,
.encrypt = crypto_aegis256_encrypt,
.decrypt = crypto_aegis256_decrypt,
.init = crypto_aegis256_init_tfm,
.exit = crypto_aegis256_exit_tfm,
.ivsize = AEGIS256_NONCE_SIZE,
.maxauthsize = AEGIS256_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
.cra_priority = 100,
.cra_name = "aegis256",
.cra_driver_name = "aegis256-generic",
.cra_module = THIS_MODULE,
}
};
static int __init crypto_aegis256_module_init(void)
{
return crypto_register_aead(&crypto_aegis256_alg);
}
static void __exit crypto_aegis256_module_exit(void)
{
crypto_unregister_aead(&crypto_aegis256_alg);
}
module_init(crypto_aegis256_module_init);
module_exit(crypto_aegis256_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm");
MODULE_ALIAS_CRYPTO("aegis256");
MODULE_ALIAS_CRYPTO("aegis256-generic");

View File

@@ -10,6 +10,7 @@
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fips.h>
@@ -59,6 +60,15 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL;
if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_CIPHER) {
if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
return -EINVAL;
if (alg->cra_blocksize > MAX_CIPHER_BLOCKSIZE)
return -EINVAL;
}
if (alg->cra_priority < 0)
return -EINVAL;

View File

@@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
CRYPTO_TFM_RES_MASK);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
badkey:

View File

@@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
CRYPTO_TFM_RES_MASK);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
badkey:

View File

@@ -53,9 +53,8 @@ static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
static void crypto_cfb_final(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned int bsize = crypto_cfb_bsize(tfm);
const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
u8 tmp[bsize + alignmask];
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
@@ -94,7 +93,7 @@ static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmp[bsize];
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
crypto_cfb_encrypt_one(tfm, iv, tmp);
@@ -164,7 +163,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmp[bsize];
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
crypto_cfb_encrypt_one(tfm, iv, tmp);

View File

@@ -13,6 +13,7 @@
*
*/
#include <crypto/algapi.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
@@ -67,7 +68,7 @@ static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
unsigned int size = crypto_tfm_alg_blocksize(tfm);
u8 buffer[size + alignmask];
u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, size);

View File

@@ -29,6 +29,7 @@
* This is crypto api shash wrappers to crc32_le.
*/
#include <asm/unaligned.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
@@ -39,11 +40,6 @@
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le(crc, p, len);
}
/** No default init with ~0 */
static int crc32_cra_init(struct crypto_tfm *tfm)
{
@@ -54,7 +50,6 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
@@ -69,7 +64,7 @@ static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
*mctx = get_unaligned_le32(key);
return 0;
}
@@ -88,7 +83,7 @@ static int crc32_update(struct shash_desc *desc, const u8 *data,
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = __crc32_le(*crcp, data, len);
*crcp = crc32_le(*crcp, data, len);
return 0;
}
@@ -96,7 +91,7 @@ static int crc32_update(struct shash_desc *desc, const u8 *data,
static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
*(__le32 *)out = cpu_to_le32(__crc32_le(*crcp, data, len));
put_unaligned_le32(crc32_le(*crcp, data, len), out);
return 0;
}
@@ -110,7 +105,7 @@ static int crc32_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
*(__le32 *)out = cpu_to_le32p(crcp);
put_unaligned_le32(*crcp, out);
return 0;
}

View File

@@ -35,6 +35,7 @@
*
*/
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -82,7 +83,7 @@ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
mctx->key = le32_to_cpu(*(__le32 *)key);
mctx->key = get_unaligned_le32(key);
return 0;
}
@@ -99,13 +100,13 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
*(__le32 *)out = ~cpu_to_le32p(&ctx->crc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
*(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len));
put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
return 0;
}
@@ -148,7 +149,6 @@ static struct shash_alg alg = {
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32c_cra_init,

View File

@@ -58,7 +58,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
u8 *ctrblk = walk->iv;
u8 tmp[bsize + alignmask];
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
@@ -106,7 +106,7 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
unsigned int nbytes = walk->nbytes;
u8 *ctrblk = walk->iv;
u8 *src = walk->src.virt.addr;
u8 tmp[bsize + alignmask];
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {

View File

@@ -40,6 +40,7 @@
* rfc3962 includes errata information in its Appendix A.
*/
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -104,7 +105,7 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
u8 d[bsize * 2] __aligned(__alignof__(u32));
u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
struct scatterlist *sg;
unsigned int offset;
int lastn;
@@ -183,7 +184,7 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
u8 d[bsize * 2] __aligned(__alignof__(u32));
u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
struct scatterlist *sg;
unsigned int offset;
u8 *space;

View File

@@ -515,7 +515,7 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
static bool vli_mmod_fast(u64 *result, u64 *product,
const u64 *curve_prime, unsigned int ndigits)
{
u64 tmp[2 * ndigits];
u64 tmp[2 * ECC_MAX_DIGITS];
switch (ndigits) {
case 3:
@@ -536,7 +536,7 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
const u64 *curve_prime, unsigned int ndigits)
{
u64 product[2 * ndigits];
u64 product[2 * ECC_MAX_DIGITS];
vli_mult(product, left, right, ndigits);
vli_mmod_fast(result, product, curve_prime, ndigits);
@@ -546,7 +546,7 @@ static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
static void vli_mod_square_fast(u64 *result, const u64 *left,
const u64 *curve_prime, unsigned int ndigits)
{
u64 product[2 * ndigits];
u64 product[2 * ECC_MAX_DIGITS];
vli_square(product, left, ndigits);
vli_mmod_fast(result, product, curve_prime, ndigits);
@@ -560,8 +560,8 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
unsigned int ndigits)
{
u64 a[ndigits], b[ndigits];
u64 u[ndigits], v[ndigits];
u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
u64 u[ECC_MAX_DIGITS], v[ECC_MAX_DIGITS];
u64 carry;
int cmp_result;
@@ -649,8 +649,8 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
u64 *curve_prime, unsigned int ndigits)
{
/* t1 = x, t2 = y, t3 = z */
u64 t4[ndigits];
u64 t5[ndigits];
u64 t4[ECC_MAX_DIGITS];
u64 t5[ECC_MAX_DIGITS];
if (vli_is_zero(z1, ndigits))
return;
@@ -711,7 +711,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
unsigned int ndigits)
{
u64 t1[ndigits];
u64 t1[ECC_MAX_DIGITS];
vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */
vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
@@ -724,7 +724,7 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
u64 *p_initial_z, u64 *curve_prime,
unsigned int ndigits)
{
u64 z[ndigits];
u64 z[ECC_MAX_DIGITS];
vli_set(x2, x1, ndigits);
vli_set(y2, y1, ndigits);
@@ -750,7 +750,7 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
unsigned int ndigits)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ndigits];
u64 t5[ECC_MAX_DIGITS];
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
@@ -791,9 +791,9 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
unsigned int ndigits)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ndigits];
u64 t6[ndigits];
u64 t7[ndigits];
u64 t5[ECC_MAX_DIGITS];
u64 t6[ECC_MAX_DIGITS];
u64 t7[ECC_MAX_DIGITS];
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
@@ -846,9 +846,9 @@ static void ecc_point_mult(struct ecc_point *result,
unsigned int ndigits)
{
/* R0 and R1 */
u64 rx[2][ndigits];
u64 ry[2][ndigits];
u64 z[ndigits];
u64 rx[2][ECC_MAX_DIGITS];
u64 ry[2][ECC_MAX_DIGITS];
u64 z[ECC_MAX_DIGITS];
int i, nb;
int num_bits = vli_num_bits(scalar, ndigits);
@@ -943,13 +943,13 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
{
const struct ecc_curve *curve = ecc_get_curve(curve_id);
u64 priv[ndigits];
u64 priv[ECC_MAX_DIGITS];
unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
unsigned int nbits = vli_num_bits(curve->n, ndigits);
int err;
/* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
if (nbits < 160)
if (nbits < 160 || ndigits > ARRAY_SIZE(priv))
return -EINVAL;
/*
@@ -988,10 +988,10 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *pk;
u64 priv[ndigits];
u64 priv[ECC_MAX_DIGITS];
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !curve) {
if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) {
ret = -EINVAL;
goto out;
}
@@ -1025,30 +1025,25 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *product, *pk;
u64 *priv, *rand_z;
u64 priv[ECC_MAX_DIGITS];
u64 rand_z[ECC_MAX_DIGITS];
unsigned int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !public_key || !curve) {
if (!private_key || !public_key || !curve ||
ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) {
ret = -EINVAL;
goto out;
}
priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto out;
}
nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
if (!rand_z) {
ret = -ENOMEM;
goto kfree_out;
}
get_random_bytes(rand_z, nbytes);
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
goto kfree_out;
goto out;
}
product = ecc_alloc_point(ndigits);
@@ -1057,8 +1052,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
@@ -1073,9 +1066,6 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
kfree_out:
kzfree(priv);
kzfree(rand_z);
out:
return ret;
}

View File

@@ -26,7 +26,9 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
#define ECC_MAX_DIGITS 4 /* 256 */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS
#define ECC_DIGITS_TO_BYTES_SHIFT 3

View File

@@ -30,8 +30,8 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
static unsigned int ecdh_supported_curve(unsigned int curve_id)
{
switch (curve_id) {
case ECC_CURVE_NIST_P192: return 3;
case ECC_CURVE_NIST_P256: return 4;
case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS;
case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS;
default: return 0;
}
}

549
crypto/morus1280.c Normal file
View File

@@ -0,0 +1,549 @@
/*
* The MORUS-1280 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus_common.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#define MORUS1280_WORD_SIZE 8
#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE)
#define MORUS1280_BLOCK_ALIGN (__alignof__(__le64))
#define MORUS1280_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS1280_BLOCK_ALIGN)
struct morus1280_block {
u64 words[MORUS_BLOCK_WORDS];
};
union morus1280_block_in {
__le64 words[MORUS_BLOCK_WORDS];
u8 bytes[MORUS1280_BLOCK_SIZE];
};
struct morus1280_state {
struct morus1280_block s[MORUS_STATE_BLOCKS];
};
struct morus1280_ctx {
struct morus1280_block key;
};
struct morus1280_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_chunk)(struct morus1280_state *state,
u8 *dst, const u8 *src, unsigned int size);
};
static const struct morus1280_block crypto_morus1280_const[1] = {
{ .words = {
U64_C(0x0d08050302010100),
U64_C(0x6279e99059372215),
U64_C(0xf12fc26d55183ddb),
U64_C(0xdd28b57342311120),
} },
};
static void crypto_morus1280_round(struct morus1280_block *b0,
struct morus1280_block *b1,
struct morus1280_block *b2,
struct morus1280_block *b3,
struct morus1280_block *b4,
const struct morus1280_block *m,
unsigned int b, unsigned int w)
{
unsigned int i;
struct morus1280_block tmp;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
b0->words[i] ^= b1->words[i] & b2->words[i];
b0->words[i] ^= b3->words[i];
b0->words[i] ^= m->words[i];
b0->words[i] = rol64(b0->words[i], b);
}
tmp = *b3;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i];
}
static void crypto_morus1280_update(struct morus1280_state *state,
const struct morus1280_block *m)
{
static const struct morus1280_block z = {};
struct morus1280_block *s = state->s;
crypto_morus1280_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 13, 1);
crypto_morus1280_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 46, 2);
crypto_morus1280_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 38, 3);
crypto_morus1280_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 7, 2);
crypto_morus1280_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 4, 1);
}
static void crypto_morus1280_load_a(struct morus1280_block *dst, const u8 *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
dst->words[i] = le64_to_cpu(*(const __le64 *)src);
src += MORUS1280_WORD_SIZE;
}
}
static void crypto_morus1280_load_u(struct morus1280_block *dst, const u8 *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
dst->words[i] = get_unaligned_le64(src);
src += MORUS1280_WORD_SIZE;
}
}
static void crypto_morus1280_load(struct morus1280_block *dst, const u8 *src)
{
if (MORUS1280_ALIGNED(src))
crypto_morus1280_load_a(dst, src);
else
crypto_morus1280_load_u(dst, src);
}
static void crypto_morus1280_store_a(u8 *dst, const struct morus1280_block *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
*(__le64 *)dst = cpu_to_le64(src->words[i]);
dst += MORUS1280_WORD_SIZE;
}
}
static void crypto_morus1280_store_u(u8 *dst, const struct morus1280_block *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
put_unaligned_le64(src->words[i], dst);
dst += MORUS1280_WORD_SIZE;
}
}
static void crypto_morus1280_store(u8 *dst, const struct morus1280_block *src)
{
if (MORUS1280_ALIGNED(dst))
crypto_morus1280_store_a(dst, src);
else
crypto_morus1280_store_u(dst, src);
}
static void crypto_morus1280_ad(struct morus1280_state *state, const u8 *src,
unsigned int size)
{
struct morus1280_block m;
if (MORUS1280_ALIGNED(src)) {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_a(&m, src);
crypto_morus1280_update(state, &m);
size -= MORUS1280_BLOCK_SIZE;
src += MORUS1280_BLOCK_SIZE;
}
} else {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_u(&m, src);
crypto_morus1280_update(state, &m);
size -= MORUS1280_BLOCK_SIZE;
src += MORUS1280_BLOCK_SIZE;
}
}
}
static void crypto_morus1280_core(const struct morus1280_state *state,
struct morus1280_block *blk)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i];
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
blk->words[i] ^= state->s[0].words[i];
blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i];
}
}
static void crypto_morus1280_encrypt_chunk(struct morus1280_state *state,
u8 *dst, const u8 *src,
unsigned int size)
{
struct morus1280_block c, m;
if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_a(&m, src);
c = m;
crypto_morus1280_core(state, &c);
crypto_morus1280_store_a(dst, &c);
crypto_morus1280_update(state, &m);
src += MORUS1280_BLOCK_SIZE;
dst += MORUS1280_BLOCK_SIZE;
size -= MORUS1280_BLOCK_SIZE;
}
} else {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_u(&m, src);
c = m;
crypto_morus1280_core(state, &c);
crypto_morus1280_store_u(dst, &c);
crypto_morus1280_update(state, &m);
src += MORUS1280_BLOCK_SIZE;
dst += MORUS1280_BLOCK_SIZE;
size -= MORUS1280_BLOCK_SIZE;
}
}
if (size > 0) {
union morus1280_block_in tail;
memcpy(tail.bytes, src, size);
memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
crypto_morus1280_load_a(&m, tail.bytes);
c = m;
crypto_morus1280_core(state, &c);
crypto_morus1280_store_a(tail.bytes, &c);
crypto_morus1280_update(state, &m);
memcpy(dst, tail.bytes, size);
}
}
static void crypto_morus1280_decrypt_chunk(struct morus1280_state *state,
u8 *dst, const u8 *src,
unsigned int size)
{
struct morus1280_block m;
if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_a(&m, src);
crypto_morus1280_core(state, &m);
crypto_morus1280_store_a(dst, &m);
crypto_morus1280_update(state, &m);
src += MORUS1280_BLOCK_SIZE;
dst += MORUS1280_BLOCK_SIZE;
size -= MORUS1280_BLOCK_SIZE;
}
} else {
while (size >= MORUS1280_BLOCK_SIZE) {
crypto_morus1280_load_u(&m, src);
crypto_morus1280_core(state, &m);
crypto_morus1280_store_u(dst, &m);
crypto_morus1280_update(state, &m);
src += MORUS1280_BLOCK_SIZE;
dst += MORUS1280_BLOCK_SIZE;
size -= MORUS1280_BLOCK_SIZE;
}
}
if (size > 0) {
union morus1280_block_in tail;
memcpy(tail.bytes, src, size);
memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
crypto_morus1280_load_a(&m, tail.bytes);
crypto_morus1280_core(state, &m);
crypto_morus1280_store_a(tail.bytes, &m);
memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size);
crypto_morus1280_load_a(&m, tail.bytes);
crypto_morus1280_update(state, &m);
memcpy(dst, tail.bytes, size);
}
}
static void crypto_morus1280_init(struct morus1280_state *state,
const struct morus1280_block *key,
const u8 *iv)
{
static const struct morus1280_block z = {};
union morus1280_block_in tmp;
unsigned int i;
memcpy(tmp.bytes, iv, MORUS_NONCE_SIZE);
memset(tmp.bytes + MORUS_NONCE_SIZE, 0,
MORUS1280_BLOCK_SIZE - MORUS_NONCE_SIZE);
crypto_morus1280_load(&state->s[0], tmp.bytes);
state->s[1] = *key;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[2].words[i] = U64_C(0xFFFFFFFFFFFFFFFF);
state->s[3] = z;
state->s[4] = crypto_morus1280_const[0];
for (i = 0; i < 16; i++)
crypto_morus1280_update(state, &z);
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[1].words[i] ^= key->words[i];
}
static void crypto_morus1280_process_ad(struct morus1280_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
struct morus1280_block m;
union morus1280_block_in buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= MORUS1280_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_morus1280_load_a(&m, buf.bytes);
crypto_morus1280_update(state, &m);
pos = 0;
left -= fill;
src += fill;
}
crypto_morus1280_ad(state, src, left);
src += left & ~(MORUS1280_BLOCK_SIZE - 1);
left &= MORUS1280_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
crypto_morus1280_load_a(&m, buf.bytes);
crypto_morus1280_update(state, &m);
}
}
static void crypto_morus1280_process_crypt(struct morus1280_state *state,
struct aead_request *req,
const struct morus1280_ops *ops)
{
struct skcipher_walk walk;
u8 *dst;
const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
ops->crypt_chunk(state, dst, src, walk.nbytes);
skcipher_walk_done(&walk, 0);
}
}
static void crypto_morus1280_final(struct morus1280_state *state,
struct morus1280_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
struct morus1280_block tmp;
unsigned int i;
tmp.words[0] = cpu_to_le64(assocbits);
tmp.words[1] = cpu_to_le64(cryptbits);
tmp.words[2] = 0;
tmp.words[3] = 0;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[4].words[i] ^= state->s[0].words[i];
for (i = 0; i < 10; i++)
crypto_morus1280_update(state, &tmp);
crypto_morus1280_core(state, tag_xor);
}
static int crypto_morus1280_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
union morus1280_block_in tmp;
if (keylen == MORUS1280_BLOCK_SIZE)
crypto_morus1280_load(&ctx->key, key);
else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
memcpy(tmp.bytes, key, keylen);
memcpy(tmp.bytes + keylen, key, keylen);
crypto_morus1280_load(&ctx->key, tmp.bytes);
} else {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
}
static int crypto_morus1280_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
}
static void crypto_morus1280_crypt(struct aead_request *req,
struct morus1280_block *tag_xor,
unsigned int cryptlen,
const struct morus1280_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
struct morus1280_state state;
crypto_morus1280_init(&state, &ctx->key, req->iv);
crypto_morus1280_process_ad(&state, req->src, req->assoclen);
crypto_morus1280_process_crypt(&state, req, ops);
crypto_morus1280_final(&state, tag_xor, req->assoclen, cryptlen);
}
static int crypto_morus1280_encrypt(struct aead_request *req)
{
static const struct morus1280_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_chunk = crypto_morus1280_encrypt_chunk,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus1280_block tag = {};
union morus1280_block_in tag_out;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_morus1280_crypt(req, &tag, cryptlen, &ops);
crypto_morus1280_store(tag_out.bytes, &tag);
scatterwalk_map_and_copy(tag_out.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
return 0;
}
static int crypto_morus1280_decrypt(struct aead_request *req)
{
static const struct morus1280_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_chunk = crypto_morus1280_decrypt_chunk,
};
static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union morus1280_block_in tag_in;
struct morus1280_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag_in.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
crypto_morus1280_load(&tag, tag_in.bytes);
crypto_morus1280_crypt(req, &tag, cryptlen, &ops);
crypto_morus1280_store(tag_in.bytes, &tag);
return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0;
}
static int crypto_morus1280_init_tfm(struct crypto_aead *tfm)
{
return 0;
}
static void crypto_morus1280_exit_tfm(struct crypto_aead *tfm)
{
}
static struct aead_alg crypto_morus1280_alg = {
.setkey = crypto_morus1280_setkey,
.setauthsize = crypto_morus1280_setauthsize,
.encrypt = crypto_morus1280_encrypt,
.decrypt = crypto_morus1280_decrypt,
.init = crypto_morus1280_init_tfm,
.exit = crypto_morus1280_exit_tfm,
.ivsize = MORUS_NONCE_SIZE,
.maxauthsize = MORUS_MAX_AUTH_SIZE,
.chunksize = MORUS1280_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus1280_ctx),
.cra_alignmask = 0,
.cra_priority = 100,
.cra_name = "morus1280",
.cra_driver_name = "morus1280-generic",
.cra_module = THIS_MODULE,
}
};
static int __init crypto_morus1280_module_init(void)
{
return crypto_register_aead(&crypto_morus1280_alg);
}
static void __exit crypto_morus1280_module_exit(void)
{
crypto_unregister_aead(&crypto_morus1280_alg);
}
module_init(crypto_morus1280_module_init);
module_exit(crypto_morus1280_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm");
MODULE_ALIAS_CRYPTO("morus1280");
MODULE_ALIAS_CRYPTO("morus1280-generic");

544
crypto/morus640.c Normal file
View File

@@ -0,0 +1,544 @@
/*
* The MORUS-640 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus_common.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#define MORUS640_WORD_SIZE 4
#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE)
#define MORUS640_BLOCK_ALIGN (__alignof__(__le32))
#define MORUS640_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS640_BLOCK_ALIGN)
struct morus640_block {
u32 words[MORUS_BLOCK_WORDS];
};
union morus640_block_in {
__le32 words[MORUS_BLOCK_WORDS];
u8 bytes[MORUS640_BLOCK_SIZE];
};
struct morus640_state {
struct morus640_block s[MORUS_STATE_BLOCKS];
};
struct morus640_ctx {
struct morus640_block key;
};
struct morus640_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
void (*crypt_chunk)(struct morus640_state *state,
u8 *dst, const u8 *src, unsigned int size);
};
static const struct morus640_block crypto_morus640_const[2] = {
{ .words = {
U32_C(0x02010100),
U32_C(0x0d080503),
U32_C(0x59372215),
U32_C(0x6279e990),
} },
{ .words = {
U32_C(0x55183ddb),
U32_C(0xf12fc26d),
U32_C(0x42311120),
U32_C(0xdd28b573),
} },
};
static void crypto_morus640_round(struct morus640_block *b0,
struct morus640_block *b1,
struct morus640_block *b2,
struct morus640_block *b3,
struct morus640_block *b4,
const struct morus640_block *m,
unsigned int b, unsigned int w)
{
unsigned int i;
struct morus640_block tmp;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
b0->words[i] ^= b1->words[i] & b2->words[i];
b0->words[i] ^= b3->words[i];
b0->words[i] ^= m->words[i];
b0->words[i] = rol32(b0->words[i], b);
}
tmp = *b3;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i];
}
static void crypto_morus640_update(struct morus640_state *state,
const struct morus640_block *m)
{
static const struct morus640_block z = {};
struct morus640_block *s = state->s;
crypto_morus640_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 5, 1);
crypto_morus640_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 31, 2);
crypto_morus640_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 7, 3);
crypto_morus640_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 22, 2);
crypto_morus640_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 13, 1);
}
static void crypto_morus640_load_a(struct morus640_block *dst, const u8 *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
dst->words[i] = le32_to_cpu(*(const __le32 *)src);
src += MORUS640_WORD_SIZE;
}
}
static void crypto_morus640_load_u(struct morus640_block *dst, const u8 *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
dst->words[i] = get_unaligned_le32(src);
src += MORUS640_WORD_SIZE;
}
}
static void crypto_morus640_load(struct morus640_block *dst, const u8 *src)
{
if (MORUS640_ALIGNED(src))
crypto_morus640_load_a(dst, src);
else
crypto_morus640_load_u(dst, src);
}
static void crypto_morus640_store_a(u8 *dst, const struct morus640_block *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
*(__le32 *)dst = cpu_to_le32(src->words[i]);
dst += MORUS640_WORD_SIZE;
}
}
static void crypto_morus640_store_u(u8 *dst, const struct morus640_block *src)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
put_unaligned_le32(src->words[i], dst);
dst += MORUS640_WORD_SIZE;
}
}
static void crypto_morus640_store(u8 *dst, const struct morus640_block *src)
{
if (MORUS640_ALIGNED(dst))
crypto_morus640_store_a(dst, src);
else
crypto_morus640_store_u(dst, src);
}
static void crypto_morus640_ad(struct morus640_state *state, const u8 *src,
unsigned int size)
{
struct morus640_block m;
if (MORUS640_ALIGNED(src)) {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_a(&m, src);
crypto_morus640_update(state, &m);
size -= MORUS640_BLOCK_SIZE;
src += MORUS640_BLOCK_SIZE;
}
} else {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_u(&m, src);
crypto_morus640_update(state, &m);
size -= MORUS640_BLOCK_SIZE;
src += MORUS640_BLOCK_SIZE;
}
}
}
static void crypto_morus640_core(const struct morus640_state *state,
struct morus640_block *blk)
{
unsigned int i;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i];
for (i = 0; i < MORUS_BLOCK_WORDS; i++) {
blk->words[i] ^= state->s[0].words[i];
blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i];
}
}
static void crypto_morus640_encrypt_chunk(struct morus640_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
struct morus640_block c, m;
if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_a(&m, src);
c = m;
crypto_morus640_core(state, &c);
crypto_morus640_store_a(dst, &c);
crypto_morus640_update(state, &m);
src += MORUS640_BLOCK_SIZE;
dst += MORUS640_BLOCK_SIZE;
size -= MORUS640_BLOCK_SIZE;
}
} else {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_u(&m, src);
c = m;
crypto_morus640_core(state, &c);
crypto_morus640_store_u(dst, &c);
crypto_morus640_update(state, &m);
src += MORUS640_BLOCK_SIZE;
dst += MORUS640_BLOCK_SIZE;
size -= MORUS640_BLOCK_SIZE;
}
}
if (size > 0) {
union morus640_block_in tail;
memcpy(tail.bytes, src, size);
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
crypto_morus640_load_a(&m, tail.bytes);
c = m;
crypto_morus640_core(state, &c);
crypto_morus640_store_a(tail.bytes, &c);
crypto_morus640_update(state, &m);
memcpy(dst, tail.bytes, size);
}
}
static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
struct morus640_block m;
if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_a(&m, src);
crypto_morus640_core(state, &m);
crypto_morus640_store_a(dst, &m);
crypto_morus640_update(state, &m);
src += MORUS640_BLOCK_SIZE;
dst += MORUS640_BLOCK_SIZE;
size -= MORUS640_BLOCK_SIZE;
}
} else {
while (size >= MORUS640_BLOCK_SIZE) {
crypto_morus640_load_u(&m, src);
crypto_morus640_core(state, &m);
crypto_morus640_store_u(dst, &m);
crypto_morus640_update(state, &m);
src += MORUS640_BLOCK_SIZE;
dst += MORUS640_BLOCK_SIZE;
size -= MORUS640_BLOCK_SIZE;
}
}
if (size > 0) {
union morus640_block_in tail;
memcpy(tail.bytes, src, size);
crypto_morus640_load_a(&m, src);
crypto_morus640_core(state, &m);
crypto_morus640_store_a(tail.bytes, &m);
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
crypto_morus640_load_a(&m, tail.bytes);
crypto_morus640_update(state, &m);
memcpy(dst, tail.bytes, size);
}
}
static void crypto_morus640_init(struct morus640_state *state,
const struct morus640_block *key,
const u8 *iv)
{
static const struct morus640_block z = {};
unsigned int i;
crypto_morus640_load(&state->s[0], iv);
state->s[1] = *key;
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[2].words[i] = U32_C(0xFFFFFFFF);
state->s[3] = crypto_morus640_const[0];
state->s[4] = crypto_morus640_const[1];
for (i = 0; i < 16; i++)
crypto_morus640_update(state, &z);
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[1].words[i] ^= key->words[i];
}
static void crypto_morus640_process_ad(struct morus640_state *state,
struct scatterlist *sg_src,
unsigned int assoclen)
{
struct scatter_walk walk;
struct morus640_block m;
union morus640_block_in buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= MORUS640_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = MORUS640_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_morus640_load_a(&m, buf.bytes);
crypto_morus640_update(state, &m);
pos = 0;
left -= fill;
src += fill;
}
crypto_morus640_ad(state, src, left);
src += left & ~(MORUS640_BLOCK_SIZE - 1);
left &= MORUS640_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos);
crypto_morus640_load_a(&m, buf.bytes);
crypto_morus640_update(state, &m);
}
}
static void crypto_morus640_process_crypt(struct morus640_state *state,
struct aead_request *req,
const struct morus640_ops *ops)
{
struct skcipher_walk walk;
u8 *dst;
const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
ops->crypt_chunk(state, dst, src, walk.nbytes);
skcipher_walk_done(&walk, 0);
}
}
static void crypto_morus640_final(struct morus640_state *state,
struct morus640_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
u32 assocbits_lo = (u32)assocbits;
u32 assocbits_hi = (u32)(assocbits >> 32);
u32 cryptbits_lo = (u32)cryptbits;
u32 cryptbits_hi = (u32)(cryptbits >> 32);
struct morus640_block tmp;
unsigned int i;
tmp.words[0] = cpu_to_le32(assocbits_lo);
tmp.words[1] = cpu_to_le32(assocbits_hi);
tmp.words[2] = cpu_to_le32(cryptbits_lo);
tmp.words[3] = cpu_to_le32(cryptbits_hi);
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[4].words[i] ^= state->s[0].words[i];
for (i = 0; i < 10; i++)
crypto_morus640_update(state, &tmp);
crypto_morus640_core(state, tag_xor);
}
static int crypto_morus640_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct morus640_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != MORUS640_BLOCK_SIZE) {
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
crypto_morus640_load(&ctx->key, key);
return 0;
}
static int crypto_morus640_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
}
static void crypto_morus640_crypt(struct aead_request *req,
struct morus640_block *tag_xor,
unsigned int cryptlen,
const struct morus640_ops *ops)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
struct morus640_state state;
crypto_morus640_init(&state, &ctx->key, req->iv);
crypto_morus640_process_ad(&state, req->src, req->assoclen);
crypto_morus640_process_crypt(&state, req, ops);
crypto_morus640_final(&state, tag_xor, req->assoclen, cryptlen);
}
static int crypto_morus640_encrypt(struct aead_request *req)
{
static const struct morus640_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_chunk = crypto_morus640_encrypt_chunk,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct morus640_block tag = {};
union morus640_block_in tag_out;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_morus640_crypt(req, &tag, cryptlen, &ops);
crypto_morus640_store(tag_out.bytes, &tag);
scatterwalk_map_and_copy(tag_out.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
return 0;
}
static int crypto_morus640_decrypt(struct aead_request *req)
{
static const struct morus640_ops ops = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_chunk = crypto_morus640_decrypt_chunk,
};
static const u8 zeros[MORUS640_BLOCK_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union morus640_block_in tag_in;
struct morus640_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
scatterwalk_map_and_copy(tag_in.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
crypto_morus640_load(&tag, tag_in.bytes);
crypto_morus640_crypt(req, &tag, cryptlen, &ops);
crypto_morus640_store(tag_in.bytes, &tag);
return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0;
}
static int crypto_morus640_init_tfm(struct crypto_aead *tfm)
{
return 0;
}
static void crypto_morus640_exit_tfm(struct crypto_aead *tfm)
{
}
static struct aead_alg crypto_morus640_alg = {
.setkey = crypto_morus640_setkey,
.setauthsize = crypto_morus640_setauthsize,
.encrypt = crypto_morus640_encrypt,
.decrypt = crypto_morus640_decrypt,
.init = crypto_morus640_init_tfm,
.exit = crypto_morus640_exit_tfm,
.ivsize = MORUS_NONCE_SIZE,
.maxauthsize = MORUS_MAX_AUTH_SIZE,
.chunksize = MORUS640_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus640_ctx),
.cra_alignmask = 0,
.cra_priority = 100,
.cra_name = "morus640",
.cra_driver_name = "morus640-generic",
.cra_module = THIS_MODULE,
}
};
static int __init crypto_morus640_module_init(void)
{
return crypto_register_aead(&crypto_morus640_alg);
}
static void __exit crypto_morus640_module_exit(void)
{
crypto_unregister_aead(&crypto_morus640_alg);
}
module_init(crypto_morus640_module_init);
module_exit(crypto_morus640_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-640 AEAD algorithm");
MODULE_ALIAS_CRYPTO("morus640");
MODULE_ALIAS_CRYPTO("morus640-generic");

View File

@@ -14,6 +14,7 @@
*
*/
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -72,7 +73,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmpbuf[bsize];
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
memcpy(tmpbuf, src, bsize);
@@ -144,7 +145,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmpbuf[bsize] __aligned(__alignof__(u32));
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
memcpy(tmpbuf, src, bsize);

View File

@@ -215,7 +215,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
ret = -ENOMEM;

View File

@@ -21,9 +21,17 @@
#include <asm/unaligned.h>
#include <crypto/internal/skcipher.h>
#include <crypto/salsa20.h>
#include <linux/module.h>
#define SALSA20_IV_SIZE 8
#define SALSA20_MIN_KEY_SIZE 16
#define SALSA20_MAX_KEY_SIZE 32
#define SALSA20_BLOCK_SIZE 64
struct salsa20_ctx {
u32 initial_state[16];
};
static void salsa20_block(u32 *state, __le32 *stream)
{
u32 x[16];
@@ -93,16 +101,15 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
}
}
void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
static void salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
const u8 *iv)
{
memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
state[6] = get_unaligned_le32(iv + 0);
state[7] = get_unaligned_le32(iv + 4);
}
EXPORT_SYMBOL_GPL(crypto_salsa20_init);
int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
static const char sigma[16] = "expand 32-byte k";
@@ -143,7 +150,6 @@ int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return 0;
}
EXPORT_SYMBOL_GPL(crypto_salsa20_setkey);
static int salsa20_crypt(struct skcipher_request *req)
{
@@ -155,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, true);
crypto_salsa20_init(state, ctx, walk.iv);
salsa20_init(state, ctx, walk.iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -183,7 +189,7 @@ static struct skcipher_alg alg = {
.max_keysize = SALSA20_MAX_KEY_SIZE,
.ivsize = SALSA20_IV_SIZE,
.chunksize = SALSA20_BLOCK_SIZE,
.setkey = crypto_salsa20_setkey,
.setkey = salsa20_setkey,
.encrypt = salsa20_crypt,
.decrypt = salsa20_crypt,
};

View File

@@ -190,21 +190,23 @@ static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
/* encrypt a block of text */
static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
}
EXPORT_SYMBOL_GPL(crypto_sm4_encrypt);
/* decrypt a block of text */
static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
}
EXPORT_SYMBOL_GPL(crypto_sm4_decrypt);
static struct crypto_alg sm4_alg = {
.cra_name = "sm4",
@@ -219,8 +221,8 @@ static struct crypto_alg sm4_alg = {
.cia_min_keysize = SM4_KEY_SIZE,
.cia_max_keysize = SM4_KEY_SIZE,
.cia_setkey = crypto_sm4_set_key,
.cia_encrypt = sm4_encrypt,
.cia_decrypt = sm4_decrypt
.cia_encrypt = crypto_sm4_encrypt,
.cia_decrypt = crypto_sm4_decrypt
}
}
};

View File

@@ -158,9 +158,9 @@ struct test_mb_aead_data {
};
static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
u32 num_mb)
u32 num_mb, int *rc)
{
int i, rc[num_mb], err = 0;
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
@@ -188,18 +188,26 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
{
unsigned long start, end;
int bcount;
int ret;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_aead_op(data, enc, num_mb);
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
return ret;
goto out;
}
pr_cont("%d operations in %d seconds (%ld bytes)\n",
bcount * num_mb, secs, (long)bcount * blen * num_mb);
return 0;
out:
kfree(rc);
return ret;
}
static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
@@ -208,10 +216,15 @@ static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_aead_op(data, enc, num_mb);
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
@@ -221,7 +234,7 @@ static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
cycles_t start, end;
start = get_cycles();
ret = do_mult_aead_op(data, enc, num_mb);
ret = do_mult_aead_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
@@ -230,11 +243,11 @@ static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
@@ -705,9 +718,10 @@ struct test_mb_ahash_data {
char *xbuf[XBUFSIZE];
};
static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb)
static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
int *rc)
{
int i, rc[num_mb], err = 0;
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++)
@@ -731,18 +745,26 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
{
unsigned long start, end;
int bcount;
int ret;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_ahash_op(data, num_mb);
ret = do_mult_ahash_op(data, num_mb, rc);
if (ret)
return ret;
goto out;
}
pr_cont("%d operations in %d seconds (%ld bytes)\n",
bcount * num_mb, secs, (long)bcount * blen * num_mb);
return 0;
out:
kfree(rc);
return ret;
}
static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
@@ -751,10 +773,15 @@ static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_ahash_op(data, num_mb);
ret = do_mult_ahash_op(data, num_mb, rc);
if (ret)
goto out;
}
@@ -764,7 +791,7 @@ static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
cycles_t start, end;
start = get_cycles();
ret = do_mult_ahash_op(data, num_mb);
ret = do_mult_ahash_op(data, num_mb, rc);
end = get_cycles();
if (ret)
@@ -773,11 +800,11 @@ static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
@@ -1118,9 +1145,9 @@ struct test_mb_skcipher_data {
};
static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
u32 num_mb)
u32 num_mb, int *rc)
{
int i, rc[num_mb], err = 0;
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
@@ -1148,18 +1175,26 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
{
unsigned long start, end;
int bcount;
int ret;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_acipher_op(data, enc, num_mb);
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
return ret;
goto out;
}
pr_cont("%d operations in %d seconds (%ld bytes)\n",
bcount * num_mb, secs, (long)bcount * blen * num_mb);
return 0;
out:
kfree(rc);
return ret;
}
static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
@@ -1168,10 +1203,15 @@ static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_acipher_op(data, enc, num_mb);
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
@@ -1181,7 +1221,7 @@ static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
cycles_t start, end;
start = get_cycles();
ret = do_mult_acipher_op(data, enc, num_mb);
ret = do_mult_acipher_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
@@ -1190,11 +1230,11 @@ static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
@@ -1606,7 +1646,7 @@ static inline int tcrypt_test(const char *alg)
return ret;
}
static int do_test(const char *alg, u32 type, u32 mask, int m)
static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
{
int i;
int ret = 0;
@@ -1621,7 +1661,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
}
for (i = 1; i < 200; i++)
ret += do_test(NULL, 0, 0, i);
ret += do_test(NULL, 0, 0, i, num_mb);
break;
case 1:
@@ -1902,10 +1942,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
ret += tcrypt_test("vmac(aes)");
break;
case 110:
ret += tcrypt_test("hmac(crc32)");
break;
case 111:
ret += tcrypt_test("hmac(sha3-224)");
break;
@@ -2903,7 +2939,7 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
err = do_test(alg, type, mask, mode);
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

265
crypto/zstd.c Normal file
View File

@@ -0,0 +1,265 @@
/*
* Cryptographic API.
*
* Copyright (c) 2017-present, Facebook, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#include <crypto/internal/scompress.h>
#define ZSTD_DEF_LEVEL 3
struct zstd_ctx {
ZSTD_CCtx *cctx;
ZSTD_DCtx *dctx;
void *cwksp;
void *dwksp;
};
static ZSTD_parameters zstd_params(void)
{
return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
}
static int zstd_comp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const ZSTD_parameters params = zstd_params();
const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
ctx->cwksp = vzalloc(wksp_size);
if (!ctx->cwksp) {
ret = -ENOMEM;
goto out;
}
ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
if (!ctx->cctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->cwksp);
goto out;
}
static int zstd_decomp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
ctx->dwksp = vzalloc(wksp_size);
if (!ctx->dwksp) {
ret = -ENOMEM;
goto out;
}
ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
if (!ctx->dctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->dwksp);
goto out;
}
static void zstd_comp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->cwksp);
ctx->cwksp = NULL;
ctx->cctx = NULL;
}
static void zstd_decomp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->dwksp);
ctx->dwksp = NULL;
ctx->dctx = NULL;
}
static int __zstd_init(void *ctx)
{
int ret;
ret = zstd_comp_init(ctx);
if (ret)
return ret;
ret = zstd_decomp_init(ctx);
if (ret)
zstd_comp_exit(ctx);
return ret;
}
static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
{
int ret;
struct zstd_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ret = __zstd_init(ctx);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
return ctx;
}
static int zstd_init(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_init(ctx);
}
static void __zstd_exit(void *ctx)
{
zstd_comp_exit(ctx);
zstd_decomp_exit(ctx);
}
static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
__zstd_exit(ctx);
kzfree(ctx);
}
static void zstd_exit(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
__zstd_exit(ctx);
}
static int __zstd_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
const ZSTD_parameters params = zstd_params();
out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
if (ZSTD_isError(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int __zstd_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
if (ZSTD_isError(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static struct crypto_alg alg = {
.cra_name = "zstd",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zstd_ctx),
.cra_module = THIS_MODULE,
.cra_init = zstd_init,
.cra_exit = zstd_exit,
.cra_u = { .compress = {
.coa_compress = zstd_compress,
.coa_decompress = zstd_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = zstd_alloc_ctx,
.free_ctx = zstd_free_ctx,
.compress = zstd_scompress,
.decompress = zstd_sdecompress,
.base = {
.cra_name = "zstd",
.cra_driver_name = "zstd-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init zstd_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret)
crypto_unregister_alg(&alg);
return ret;
}
static void __exit zstd_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Zstd Compression Algorithm");
MODULE_ALIAS_CRYPTO("zstd");