Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Add helper for simple skcipher modes.
   - Add helper to register multiple templates.
   - Set CRYPTO_TFM_NEED_KEY when setkey fails.
   - Require neither or both of export/import in shash.
   - AEAD decryption test vectors are now generated from encryption
     ones.
   - New option CONFIG_CRYPTO_MANAGER_EXTRA_TESTS that includes random
     fuzzing.

  Algorithms:
   - Conversions to skcipher and helper for many templates.
   - Add more test vectors for nhpoly1305 and adiantum.

  Drivers:
   - Add crypto4xx prng support.
   - Add xcbc/cmac/ecb support in caam.
   - Add AES support for Exynos5433 in s5p.
   - Remove sha384/sha512 from artpec7 as hardware cannot do partial
     hash"

[ There is a merge of the Freescale SoC tree in order to pull in changes
  required by patches to the caam/qi2 driver. ]

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (174 commits)
  crypto: s5p - add AES support for Exynos5433
  dt-bindings: crypto: document Exynos5433 SlimSSS
  crypto: crypto4xx - add missing of_node_put after of_device_is_available
  crypto: cavium/zip - fix collision with generic cra_driver_name
  crypto: af_alg - use struct_size() in sock_kfree_s()
  crypto: caam - remove redundant likely/unlikely annotation
  crypto: s5p - update iv after AES-CBC op end
  crypto: x86/poly1305 - Clear key material from stack in SSE2 variant
  crypto: caam - generate hash keys in-place
  crypto: caam - fix DMA mapping xcbc key twice
  crypto: caam - fix hash context DMA unmap size
  hwrng: bcm2835 - fix probe as platform device
  crypto: s5p-sss - Use AES_BLOCK_SIZE define instead of number
  crypto: stm32 - drop pointless static qualifier in stm32_hash_remove()
  crypto: chelsio - Fixed Traffic Stall
  crypto: marvell - Remove set but not used variable 'ivsize'
  crypto: ccp - Update driver messages to remove some confusion
  crypto: adiantum - add 1536 and 4096-byte test vectors
  crypto: nhpoly1305 - add a test vector with len % 16 != 0
  crypto: arm/aes-ce - update IV after partial final CTR block
  ...
Tento commit je obsažen v:
Linus Torvalds
2019-03-05 09:09:55 -08:00
170 změnil soubory, kde provedl 8942 přidání a 11459 odebrání

Zobrazit soubor

@@ -168,6 +168,16 @@ config CRYPTO_MANAGER_DISABLE_TESTS
Disable run-time self tests that normally take place at
algorithm registration.
config CRYPTO_MANAGER_EXTRA_TESTS
bool "Enable extra run-time crypto self tests"
depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS
help
Enable extra run-time self tests of registered crypto algorithms,
including randomized fuzz tests.
This is intended for developer use only, as these tests take much
longer to run than the normal self tests.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions"
help
@@ -642,7 +652,7 @@ config CRYPTO_CRC32_PCLMUL
From Intel Westmere and AMD Bulldozer processor with SSE4.2
and PCLMULQDQ supported, the processor will support
CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ
instruction. This option will create 'crc32-plcmul' module,
instruction. This option will create 'crc32-pclmul' module,
which will enable any routine to use the CRC-32-IEEE 802.3 checksum
and gain better performance as compared with the table implementation.
@@ -671,7 +681,7 @@ config CRYPTO_CRCT10DIF_PCLMUL
For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
CRC T10 DIF PCLMULQDQ computation can be hardware
accelerated PCLMULQDQ instruction. This option will create
'crct10dif-plcmul' module, which is faster when computing the
'crct10dif-pclmul' module, which is faster when computing the
crct10dif checksum as compared with the generic table implementation.
config CRYPTO_CRCT10DIF_VPMSUM

Zobrazit soubor

@@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
else
err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
return err;
}
crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;

Zobrazit soubor

@@ -1,14 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AEGIS common definitions
*
* Copyright (c) 2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (c) 2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#ifndef _CRYPTO_AEGIS_H

Zobrazit soubor

@@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
@@ -290,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
const struct aegis128_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}

Zobrazit soubor

@@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128L Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
@@ -353,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
const struct aegis128l_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}

Zobrazit soubor

@@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-256 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/algapi.h>
@@ -303,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
const struct aegis256_ops *ops)
{
struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}

Zobrazit soubor

@@ -304,8 +304,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
if (err)
goto unlock;
sk2->sk_family = PF_ALG;
if (nokey || !ask->refcnt++)
sock_hold(sk);
ask->nokey_refcnt += nokey;
@@ -382,7 +380,6 @@ static int alg_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &alg_proto_ops;
sock_init_data(sock, sk);
sk->sk_family = PF_ALG;
sk->sk_destruct = alg_sock_destruct;
return 0;
@@ -427,12 +424,12 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);
void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
}
EXPORT_SYMBOL_GPL(af_alg_link_sg);
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
@@ -443,7 +440,7 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
{
struct cmsghdr *cmsg;
@@ -482,7 +479,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
/**
* af_alg_alloc_tsgl - allocate the TX SGL
@@ -490,7 +486,7 @@ EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
* @sk socket of connection to user space
* @return: 0 upon success, < 0 upon error
*/
int af_alg_alloc_tsgl(struct sock *sk)
static int af_alg_alloc_tsgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -519,7 +515,6 @@ int af_alg_alloc_tsgl(struct sock *sk)
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
/**
* aead_count_tsgl - Count number of TX SG entries
@@ -534,17 +529,17 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
*/
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct af_alg_tsgl *sgl, *tmp;
const struct alg_sock *ask = alg_sk(sk);
const struct af_alg_ctx *ctx = ask->private;
const struct af_alg_tsgl *sgl;
unsigned int i;
unsigned int sgl_count = 0;
if (!bytes)
return 0;
list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
struct scatterlist *sg = sgl->sg;
list_for_each_entry(sgl, &ctx->tsgl_list, list) {
const struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t bytes_count;
@@ -642,8 +637,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
}
list_del(&sgl->list);
sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
(MAX_SGL_ENTS + 1));
sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
}
if (!ctx->used)
@@ -656,7 +650,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
*
* @areq Request holding the TX and RX SGL
*/
void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -685,7 +679,6 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
}
}
EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
/**
* af_alg_wait_for_wmem - wait for availability of writable memory
@@ -694,7 +687,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
* @return 0 when writable memory is available, < 0 upon error
*/
int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = -ERESTARTSYS;
@@ -719,7 +712,6 @@ int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
return err;
}
EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
/**
* af_alg_wmem_wakeup - wakeup caller when writable memory is available
@@ -788,8 +780,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
*
* @sk socket of connection to user space
*/
void af_alg_data_wakeup(struct sock *sk)
static void af_alg_data_wakeup(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -807,7 +798,6 @@ void af_alg_data_wakeup(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
/**
* af_alg_sendmsg - implementation of sendmsg system call handler

Zobrazit soubor

@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
if (walk->entrylen && (walk->offset & alignmask) && !err) {
unsigned int nbytes;
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(walk->entrylen,
(unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
if (nbytes) {
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
@@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
return ret;
}
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static void ahash_set_needkey(struct crypto_ahash *tfm)
{
const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (tfm->setkey != ahash_nosetkey &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
else
err = tfm->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
ahash_set_needkey(tfm);
return err;
}
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static inline unsigned int ahash_align_buffer_size(unsigned len,
unsigned long mask)
{
@@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
ahash_set_needkey(hash);
}
return 0;

Zobrazit soubor

@@ -494,6 +494,24 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_register_template);
int crypto_register_templates(struct crypto_template *tmpls, int count)
{
int i, err;
for (i = 0; i < count; i++) {
err = crypto_register_template(&tmpls[i]);
if (err)
goto out;
}
return 0;
out:
for (--i; i >= 0; --i)
crypto_unregister_template(&tmpls[i]);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_templates);
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
@@ -523,6 +541,15 @@ void crypto_unregister_template(struct crypto_template *tmpl)
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
void crypto_unregister_templates(struct crypto_template *tmpls, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_template(&tmpls[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_templates);
static struct crypto_template *__crypto_lookup_template(const char *name)
{
struct crypto_template *q, *tmpl = NULL;
@@ -608,6 +635,9 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EAGAIN;
if (WARN_ON_ONCE(inst == NULL))
return -EINVAL;
spawn->inst = inst;
spawn->mask = mask;
@@ -845,8 +875,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name,
}
EXPORT_SYMBOL_GPL(crypto_inst_setname);
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
unsigned int head)
void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
unsigned int head)
{
struct crypto_instance *inst;
char *p;
@@ -869,35 +899,6 @@ err_free_inst:
kfree(p);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct crypto_spawn *spawn;
int err;
inst = crypto_alloc_instance2(name, alg, 0);
if (IS_ERR(inst))
goto out;
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
return inst;
err_free_inst:
kfree(inst);
inst = ERR_PTR(err);
out:
return inst;
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)

Zobrazit soubor

@@ -12,14 +12,11 @@
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#define ARC4_MIN_KEY_SIZE 1
#define ARC4_MAX_KEY_SIZE 256
#define ARC4_BLOCK_SIZE 1
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
struct arc4_ctx {
u32 S[256];
@@ -50,6 +47,12 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
return arc4_set_key(&tfm->base, in_key, key_len);
}
static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in,
unsigned int len)
{
@@ -92,30 +95,25 @@ static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in)
arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1);
}
static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
static int ecb_arc4_crypt(struct skcipher_request *req)
{
struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
u8 *wsrc = walk.src.virt.addr;
u8 *wdst = walk.dst.virt.addr;
arc4_crypt(ctx, wdst, wsrc, walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static struct crypto_alg arc4_algs[2] = { {
static struct crypto_alg arc4_cipher = {
.cra_name = "arc4",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARC4_BLOCK_SIZE,
@@ -130,34 +128,39 @@ static struct crypto_alg arc4_algs[2] = { {
.cia_decrypt = arc4_crypt_one,
},
},
}, {
.cra_name = "ecb(arc4)",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = ARC4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct arc4_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = ARC4_MIN_KEY_SIZE,
.max_keysize = ARC4_MAX_KEY_SIZE,
.setkey = arc4_set_key,
.encrypt = ecb_arc4_crypt,
.decrypt = ecb_arc4_crypt,
},
},
} };
};
static struct skcipher_alg arc4_skcipher = {
.base.cra_name = "ecb(arc4)",
.base.cra_priority = 100,
.base.cra_blocksize = ARC4_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct arc4_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARC4_MIN_KEY_SIZE,
.max_keysize = ARC4_MAX_KEY_SIZE,
.setkey = arc4_set_key_skcipher,
.encrypt = ecb_arc4_crypt,
.decrypt = ecb_arc4_crypt,
};
static int __init arc4_init(void)
{
return crypto_register_algs(arc4_algs, ARRAY_SIZE(arc4_algs));
int err;
err = crypto_register_alg(&arc4_cipher);
if (err)
return err;
err = crypto_register_skcipher(&arc4_skcipher);
if (err)
crypto_unregister_alg(&arc4_cipher);
return err;
}
static void __exit arc4_exit(void)
{
crypto_unregister_algs(arc4_algs, ARRAY_SIZE(arc4_algs));
crypto_unregister_alg(&arc4_cipher);
crypto_unregister_skcipher(&arc4_skcipher);
}
module_init(arc4_init);

Zobrazit soubor

@@ -18,34 +18,11 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/slab.h>
struct crypto_cbc_ctx {
struct crypto_cipher *child;
};
static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_cipher_encrypt_one(ctx->child, dst, src);
crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
static int crypto_cbc_encrypt(struct skcipher_request *req)
@@ -56,9 +33,7 @@ static int crypto_cbc_encrypt(struct skcipher_request *req)
static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_cipher_decrypt_one(ctx->child, dst, src);
crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
static int crypto_cbc_decrypt(struct skcipher_request *req)
@@ -78,113 +53,33 @@ static int crypto_cbc_decrypt(struct skcipher_request *req)
return err;
}
static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static void crypto_cbc_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct crypto_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
goto err_free_inst;
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
err = PTR_ERR(alg);
if (IS_ERR(alg))
goto err_free_inst;
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
if (err)
goto err_drop_spawn;
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
err = -EINVAL;
if (!is_power_of_2(alg->cra_blocksize))
goto err_drop_spawn;
goto out_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.ivsize = alg->cra_blocksize;
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
inst->alg.init = crypto_cbc_init_tfm;
inst->alg.exit = crypto_cbc_exit_tfm;
inst->alg.setkey = crypto_cbc_setkey;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
inst->free = crypto_cbc_free;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
crypto_mod_put(alg);
goto out_free_inst;
goto out_put_alg;
out:
out_free_inst:
inst->free(inst);
out_put_alg:
crypto_mod_put(alg);
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
}
static struct crypto_template crypto_cbc_tmpl = {
@@ -207,5 +102,5 @@ module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher algorithm");
MODULE_DESCRIPTION("CBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cbc");

Zobrazit soubor

@@ -589,12 +589,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
mac_name);
}
static struct crypto_template crypto_ccm_tmpl = {
.name = "ccm",
.create = crypto_ccm_create,
.module = THIS_MODULE,
};
static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
@@ -618,12 +612,6 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
cipher_name);
}
static struct crypto_template crypto_ccm_base_tmpl = {
.name = "ccm_base",
.create = crypto_ccm_base_create,
.module = THIS_MODULE,
};
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -854,12 +842,6 @@ out_free_inst:
goto out;
}
static struct crypto_template crypto_rfc4309_tmpl = {
.name = "rfc4309",
.create = crypto_rfc4309_create,
.module = THIS_MODULE,
};
static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -999,51 +981,37 @@ out_put_alg:
return err;
}
static struct crypto_template crypto_cbcmac_tmpl = {
.name = "cbcmac",
.create = cbcmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
static struct crypto_template crypto_ccm_tmpls[] = {
{
.name = "cbcmac",
.create = cbcmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
}, {
.name = "ccm_base",
.create = crypto_ccm_base_create,
.module = THIS_MODULE,
}, {
.name = "ccm",
.create = crypto_ccm_create,
.module = THIS_MODULE,
}, {
.name = "rfc4309",
.create = crypto_rfc4309_create,
.module = THIS_MODULE,
},
};
static int __init crypto_ccm_module_init(void)
{
int err;
err = crypto_register_template(&crypto_cbcmac_tmpl);
if (err)
goto out;
err = crypto_register_template(&crypto_ccm_base_tmpl);
if (err)
goto out_undo_cbcmac;
err = crypto_register_template(&crypto_ccm_tmpl);
if (err)
goto out_undo_base;
err = crypto_register_template(&crypto_rfc4309_tmpl);
if (err)
goto out_undo_ccm;
out:
return err;
out_undo_ccm:
crypto_unregister_template(&crypto_ccm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_ccm_base_tmpl);
out_undo_cbcmac:
crypto_register_template(&crypto_cbcmac_tmpl);
goto out;
return crypto_register_templates(crypto_ccm_tmpls,
ARRAY_SIZE(crypto_ccm_tmpls));
}
static void __exit crypto_ccm_module_exit(void)
{
crypto_unregister_template(&crypto_rfc4309_tmpl);
crypto_unregister_template(&crypto_ccm_tmpl);
crypto_unregister_template(&crypto_ccm_base_tmpl);
crypto_unregister_template(&crypto_cbcmac_tmpl);
crypto_unregister_templates(crypto_ccm_tmpls,
ARRAY_SIZE(crypto_ccm_tmpls));
}
module_init(crypto_ccm_module_init);

Zobrazit soubor

@@ -25,28 +25,17 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
struct crypto_cfb_ctx {
struct crypto_cipher *child;
};
static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
{
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
return crypto_cipher_blocksize(child);
return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
}
static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_cipher_encrypt_one(ctx->child, dst, src);
crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
/* final encrypt and decrypt is the same */
@@ -77,12 +66,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
crypto_xor(dst, src, bsize);
memcpy(iv, dst, bsize);
iv = dst;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@@ -162,7 +153,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
@@ -172,8 +163,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@@ -186,22 +175,6 @@ static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
return crypto_cfb_decrypt_segment(walk, tfm);
}
static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_cfb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -224,110 +197,34 @@ static int crypto_cfb_decrypt(struct skcipher_request *req)
return err;
}
static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static void crypto_cfb_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct crypto_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
if (err)
return err;
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
goto err_free_inst;
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
err = PTR_ERR(alg);
if (IS_ERR(alg))
goto err_free_inst;
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
if (err)
goto err_drop_spawn;
inst->alg.base.cra_priority = alg->cra_priority;
/* we're a stream cipher independend of the crypto cra_blocksize */
/* CFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.ivsize = alg->cra_blocksize;
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
inst->alg.init = crypto_cfb_init_tfm;
inst->alg.exit = crypto_cfb_exit_tfm;
inst->alg.setkey = crypto_cfb_setkey;
inst->alg.encrypt = crypto_cfb_encrypt;
inst->alg.decrypt = crypto_cfb_decrypt;
inst->free = crypto_cfb_free;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
crypto_mod_put(alg);
inst->free(inst);
out:
crypto_mod_put(alg);
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
}
static struct crypto_template crypto_cfb_tmpl = {
@@ -350,5 +247,5 @@ module_init(crypto_cfb_module_init);
module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CFB block cipher algorithm");
MODULE_DESCRIPTION("CFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cfb");

Zobrazit soubor

@@ -701,37 +701,28 @@ static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
}
static struct crypto_template rfc7539_tmpl = {
.name = "rfc7539",
.create = rfc7539_create,
.module = THIS_MODULE,
};
static struct crypto_template rfc7539esp_tmpl = {
.name = "rfc7539esp",
.create = rfc7539esp_create,
.module = THIS_MODULE,
static struct crypto_template rfc7539_tmpls[] = {
{
.name = "rfc7539",
.create = rfc7539_create,
.module = THIS_MODULE,
}, {
.name = "rfc7539esp",
.create = rfc7539esp_create,
.module = THIS_MODULE,
},
};
static int __init chacha20poly1305_module_init(void)
{
int err;
err = crypto_register_template(&rfc7539_tmpl);
if (err)
return err;
err = crypto_register_template(&rfc7539esp_tmpl);
if (err)
crypto_unregister_template(&rfc7539_tmpl);
return err;
return crypto_register_templates(rfc7539_tmpls,
ARRAY_SIZE(rfc7539_tmpls));
}
static void __exit chacha20poly1305_module_exit(void)
{
crypto_unregister_template(&rfc7539esp_tmpl);
crypto_unregister_template(&rfc7539_tmpl);
crypto_unregister_templates(rfc7539_tmpls,
ARRAY_SIZE(rfc7539_tmpls));
}
module_init(chacha20poly1305_module_init);

Zobrazit soubor

@@ -65,6 +65,10 @@ static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
@@ -74,21 +78,18 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
memcpy(dst, src, NULL_BLOCK_SIZE);
}
static int skcipher_null_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
static int null_skcipher_crypt(struct skcipher_request *req)
{
struct blkcipher_walk walk;
struct skcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr != walk.dst.virt.addr)
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
err = skcipher_walk_done(&walk, 0);
}
return err;
@@ -109,7 +110,22 @@ static struct shash_alg digest_null = {
}
};
static struct crypto_alg null_algs[3] = { {
static struct skcipher_alg skcipher_null = {
.base.cra_name = "ecb(cipher_null)",
.base.cra_driver_name = "ecb-cipher_null",
.base.cra_priority = 100,
.base.cra_blocksize = NULL_BLOCK_SIZE,
.base.cra_ctxsize = 0,
.base.cra_module = THIS_MODULE,
.min_keysize = NULL_KEY_SIZE,
.max_keysize = NULL_KEY_SIZE,
.ivsize = NULL_IV_SIZE,
.setkey = null_skcipher_setkey,
.encrypt = null_skcipher_crypt,
.decrypt = null_skcipher_crypt,
};
static struct crypto_alg null_algs[] = { {
.cra_name = "cipher_null",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
@@ -121,22 +137,6 @@ static struct crypto_alg null_algs[3] = { {
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
}, {
.cra_name = "ecb(cipher_null)",
.cra_driver_name = "ecb-cipher_null",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .blkcipher = {
.min_keysize = NULL_KEY_SIZE,
.max_keysize = NULL_KEY_SIZE,
.ivsize = NULL_IV_SIZE,
.setkey = null_setkey,
.encrypt = skcipher_null_crypt,
.decrypt = skcipher_null_crypt } }
}, {
.cra_name = "compress_null",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
@@ -199,8 +199,14 @@ static int __init crypto_null_mod_init(void)
if (ret < 0)
goto out_unregister_algs;
ret = crypto_register_skcipher(&skcipher_null);
if (ret < 0)
goto out_unregister_shash;
return 0;
out_unregister_shash:
crypto_unregister_shash(&digest_null);
out_unregister_algs:
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
out:
@@ -209,8 +215,9 @@ out:
static void __exit crypto_null_mod_fini(void)
{
crypto_unregister_shash(&digest_null);
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
crypto_unregister_shash(&digest_null);
crypto_unregister_skcipher(&skcipher_null);
}
module_init(crypto_null_mod_init);

Zobrazit soubor

@@ -20,10 +20,6 @@
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
static DEFINE_MUTEX(crypto_cfg_mutex);
extern struct sock *crypto_nlsk;
struct crypto_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;

Zobrazit soubor

@@ -17,14 +17,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_ctr_ctx {
struct crypto_cipher *child;
};
struct crypto_rfc3686_ctx {
struct crypto_skcipher *child;
u8 nonce[CTR_RFC3686_NONCE_SIZE];
@@ -35,24 +29,7 @@ struct crypto_rfc3686_req_ctx {
struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
};
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
unsigned int bsize = crypto_cipher_blocksize(tfm);
@@ -70,7 +47,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
crypto_inc(ctrblk, bsize);
}
static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
@@ -96,7 +73,7 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
return nbytes;
}
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
@@ -123,138 +100,77 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
return nbytes;
}
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int crypto_ctr_crypt(struct skcipher_request *req)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
unsigned int bsize = crypto_cipher_blocksize(child);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, bsize);
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_ctr_crypt_inplace(&walk, child);
nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
else
nbytes = crypto_ctr_crypt_segment(&walk, child);
nbytes = crypto_ctr_crypt_segment(&walk, cipher);
err = blkcipher_walk_done(desc, &walk, nbytes);
err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
crypto_ctr_crypt_final(&walk, child);
err = blkcipher_walk_done(desc, &walk, 0);
crypto_ctr_crypt_final(&walk, cipher);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_attr_type *algt;
struct skcipher_instance *inst;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
/* Block size must be >= 4 bytes. */
err = -EINVAL;
if (alg->cra_blocksize < 4)
goto out_put_alg;
goto out_free_inst;
/* If this is false we'd fail the alignment of crypto_inc. */
if (alg->cra_blocksize % 4)
goto out_put_alg;
goto out_free_inst;
inst = crypto_alloc_instance("ctr", alg);
if (IS_ERR(inst))
goto out;
/* CTR mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = 1;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.encrypt = crypto_ctr_crypt;
inst->alg.decrypt = crypto_ctr_crypt;
inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
inst->alg.cra_init = crypto_ctr_init_tfm;
inst->alg.cra_exit = crypto_ctr_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
out:
crypto_mod_put(alg);
return inst;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto out_free_inst;
goto out_put_alg;
out_free_inst:
inst->free(inst);
out_put_alg:
inst = ERR_PTR(err);
goto out;
crypto_mod_put(alg);
return err;
}
static void crypto_ctr_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_ctr_tmpl = {
.name = "ctr",
.alloc = crypto_ctr_alloc,
.free = crypto_ctr_free,
.module = THIS_MODULE,
};
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
@@ -444,42 +360,34 @@ err_free_inst:
goto out;
}
static struct crypto_template crypto_rfc3686_tmpl = {
.name = "rfc3686",
.create = crypto_rfc3686_create,
.module = THIS_MODULE,
static struct crypto_template crypto_ctr_tmpls[] = {
{
.name = "ctr",
.create = crypto_ctr_create,
.module = THIS_MODULE,
}, {
.name = "rfc3686",
.create = crypto_rfc3686_create,
.module = THIS_MODULE,
},
};
static int __init crypto_ctr_module_init(void)
{
int err;
err = crypto_register_template(&crypto_ctr_tmpl);
if (err)
goto out;
err = crypto_register_template(&crypto_rfc3686_tmpl);
if (err)
goto out_drop_ctr;
out:
return err;
out_drop_ctr:
crypto_unregister_template(&crypto_ctr_tmpl);
goto out;
return crypto_register_templates(crypto_ctr_tmpls,
ARRAY_SIZE(crypto_ctr_tmpls));
}
static void __exit crypto_ctr_module_exit(void)
{
crypto_unregister_template(&crypto_rfc3686_tmpl);
crypto_unregister_template(&crypto_ctr_tmpl);
crypto_unregister_templates(crypto_ctr_tmpls,
ARRAY_SIZE(crypto_ctr_tmpls));
}
module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR Counter block mode");
MODULE_DESCRIPTION("CTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("rfc3686");
MODULE_ALIAS_CRYPTO("ctr");

Zobrazit soubor

@@ -789,7 +789,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
/* Expand to tmp */
ret = des_ekey(tmp, key);
if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -866,7 +866,7 @@ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}

Zobrazit soubor

@@ -11,162 +11,83 @@
*/
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_ecb_ctx {
struct crypto_cipher *child;
};
static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_ecb_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
static int crypto_ecb_crypt(struct skcipher_request *req,
struct crypto_cipher *cipher,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
int bsize = crypto_cipher_blocksize(tfm);
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
while ((nbytes = walk.nbytes) != 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
do {
fn(crypto_cipher_tfm(tfm), wdst, wsrc);
fn(crypto_cipher_tfm(cipher), dst, src);
wsrc += bsize;
wdst += bsize;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
err = blkcipher_walk_done(desc, walk, nbytes);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int crypto_ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int crypto_ecb_encrypt(struct skcipher_request *req)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
blkcipher_walk_init(&walk, dst, src, nbytes);
return crypto_ecb_crypt(desc, &walk, child,
crypto_cipher_alg(child)->cia_encrypt);
return crypto_ecb_crypt(req, cipher,
crypto_cipher_alg(cipher)->cia_encrypt);
}
static int crypto_ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int crypto_ecb_decrypt(struct skcipher_request *req)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
blkcipher_walk_init(&walk, dst, src, nbytes);
return crypto_ecb_crypt(desc, &walk, child,
crypto_cipher_alg(child)->cia_decrypt);
return crypto_ecb_crypt(req, cipher,
crypto_cipher_alg(cipher)->cia_decrypt);
}
static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = crypto_alloc_instance("ecb", alg);
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
goto out_put_alg;
return PTR_ERR(inst);
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.encrypt = crypto_ecb_encrypt;
inst->alg.decrypt = crypto_ecb_decrypt;
inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx);
inst->alg.cra_init = crypto_ecb_init_tfm;
inst->alg.cra_exit = crypto_ecb_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt;
out_put_alg:
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
crypto_mod_put(alg);
return inst;
}
static void crypto_ecb_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
return err;
}
static struct crypto_template crypto_ecb_tmpl = {
.name = "ecb",
.alloc = crypto_ecb_alloc,
.free = crypto_ecb_free,
.create = crypto_ecb_create,
.module = THIS_MODULE,
};
@@ -184,5 +105,5 @@ module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher algorithm");
MODULE_DESCRIPTION("ECB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ecb");

Zobrazit soubor

@@ -247,7 +247,7 @@ static int gcm_hash_len(struct aead_request *req, u32 flags)
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
u128 lengths;
be128 lengths;
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
@@ -727,12 +727,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
ctr_name, "ghash");
}
static struct crypto_template crypto_gcm_tmpl = {
.name = "gcm",
.create = crypto_gcm_create,
.module = THIS_MODULE,
};
static int crypto_gcm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
@@ -756,12 +750,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
.name = "gcm_base",
.create = crypto_gcm_base_create,
.module = THIS_MODULE,
};
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -989,12 +977,6 @@ out_free_inst:
goto out;
}
static struct crypto_template crypto_rfc4106_tmpl = {
.name = "rfc4106",
.create = crypto_rfc4106_create,
.module = THIS_MODULE,
};
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -1231,10 +1213,24 @@ out_free_inst:
goto out;
}
static struct crypto_template crypto_rfc4543_tmpl = {
.name = "rfc4543",
.create = crypto_rfc4543_create,
.module = THIS_MODULE,
static struct crypto_template crypto_gcm_tmpls[] = {
{
.name = "gcm_base",
.create = crypto_gcm_base_create,
.module = THIS_MODULE,
}, {
.name = "gcm",
.create = crypto_gcm_create,
.module = THIS_MODULE,
}, {
.name = "rfc4106",
.create = crypto_rfc4106_create,
.module = THIS_MODULE,
}, {
.name = "rfc4543",
.create = crypto_rfc4543_create,
.module = THIS_MODULE,
},
};
static int __init crypto_gcm_module_init(void)
@@ -1247,42 +1243,19 @@ static int __init crypto_gcm_module_init(void)
sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf));
err = crypto_register_template(&crypto_gcm_base_tmpl);
err = crypto_register_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
if (err)
goto out;
kfree(gcm_zeroes);
err = crypto_register_template(&crypto_gcm_tmpl);
if (err)
goto out_undo_base;
err = crypto_register_template(&crypto_rfc4106_tmpl);
if (err)
goto out_undo_gcm;
err = crypto_register_template(&crypto_rfc4543_tmpl);
if (err)
goto out_undo_rfc4106;
return 0;
out_undo_rfc4106:
crypto_unregister_template(&crypto_rfc4106_tmpl);
out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_gcm_base_tmpl);
out:
kfree(gcm_zeroes);
return err;
}
static void __exit crypto_gcm_module_exit(void)
{
kfree(gcm_zeroes);
crypto_unregister_template(&crypto_rfc4543_tmpl);
crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl);
crypto_unregister_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
}
module_init(crypto_gcm_module_init);

Zobrazit soubor

@@ -56,7 +56,7 @@
* u8 *iv = data;
* u8 *pt = data + crypto_skcipher_ivsize(tfm);
* <ensure that pt contains the plaintext of size ptlen>
* sg_init_one(&sg, ptdata, ptlen);
* sg_init_one(&sg, pt, ptlen);
* skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
*
* ==> After encryption, data now contains full KW result as per SP800-38F.
@@ -70,8 +70,8 @@
* u8 *iv = data;
* u8 *ct = data + crypto_skcipher_ivsize(tfm);
* unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
* sg_init_one(&sg, ctdata, ctlen);
* skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
* sg_init_one(&sg, ct, ctlen);
* skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
*
* ==> After decryption (which hopefully does not return EBADMSG), the ct
* pointer now points to the plaintext of size ctlen.
@@ -87,10 +87,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
struct crypto_kw_ctx {
struct crypto_cipher *child;
};
struct crypto_kw_block {
#define SEMIBSIZE 8
__be64 A;
@@ -124,16 +120,13 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
}
}
static int crypto_kw_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int crypto_kw_decrypt(struct skcipher_request *req)
{
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
u64 t = 6 * ((nbytes) >> 3);
struct scatterlist *src, *dst;
u64 t = 6 * ((req->cryptlen) >> 3);
unsigned int i;
int ret = 0;
@@ -141,27 +134,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV.
*/
if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/* Place the IV into block A */
memcpy(&block.A, desc->info, SEMIBSIZE);
memcpy(&block.A, req->iv, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, lsrc points to src and ldst to dst. For any
* subsequent round, the code operates on dst only.
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
lsrc = src;
ldst = dst;
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int tmp_nbytes = nbytes;
unsigned int nbytes = req->cryptlen;
while (tmp_nbytes) {
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
while (nbytes) {
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
@@ -170,21 +163,21 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
crypto_cipher_decrypt_one(child, (u8*)&block,
(u8*)&block);
crypto_cipher_decrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
lsrc = dst;
ldst = dst;
src = req->dst;
dst = req->dst;
}
/* Perform authentication check */
@@ -196,15 +189,12 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
return ret;
}
static int crypto_kw_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int crypto_kw_encrypt(struct skcipher_request *req)
{
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
struct scatterlist *src, *dst;
u64 t = 1;
unsigned int i;
@@ -214,7 +204,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* This means that the dst memory must be one semiblock larger than src.
* Also ensure that the given data is aligned to semiblock.
*/
if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/*
@@ -225,26 +215,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, lsrc points to src and ldst to dst. For any
* subsequent round, the code operates on dst only.
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
lsrc = src;
ldst = dst;
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int tmp_nbytes = nbytes;
unsigned int nbytes = req->cryptlen;
scatterwalk_start(&src_walk, lsrc);
scatterwalk_start(&dst_walk, ldst);
scatterwalk_start(&src_walk, src);
scatterwalk_start(&dst_walk, dst);
while (tmp_nbytes) {
while (nbytes) {
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
crypto_cipher_encrypt_one(child, (u8 *)&block,
crypto_cipher_encrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
@@ -254,117 +244,59 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
lsrc = dst;
ldst = dst;
src = req->dst;
dst = req->dst;
}
/* establish the IV for the caller to pick up */
memcpy(desc->info, &block.A, SEMIBSIZE);
memcpy(req->iv, &block.A, SEMIBSIZE);
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
static int crypto_kw_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_kw_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb)
{
struct crypto_instance *inst = NULL;
struct crypto_alg *alg = NULL;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
err = -EINVAL;
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
goto err;
goto out_free_inst;
inst = crypto_alloc_instance("kw", alg);
if (IS_ERR(inst))
goto err;
inst->alg.base.cra_blocksize = SEMIBSIZE;
inst->alg.base.cra_alignmask = 0;
inst->alg.ivsize = SEMIBSIZE;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = SEMIBSIZE;
inst->alg.cra_alignmask = 0;
inst->alg.cra_type = &crypto_blkcipher_type;
inst->alg.cra_blkcipher.ivsize = SEMIBSIZE;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.encrypt = crypto_kw_encrypt;
inst->alg.decrypt = crypto_kw_decrypt;
inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx);
err = skcipher_register_instance(tmpl, inst);
if (err)
goto out_free_inst;
goto out_put_alg;
inst->alg.cra_init = crypto_kw_init_tfm;
inst->alg.cra_exit = crypto_kw_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_kw_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt;
err:
out_free_inst:
inst->free(inst);
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_kw_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
return err;
}
static struct crypto_template crypto_kw_tmpl = {
.name = "kw",
.alloc = crypto_kw_alloc,
.free = crypto_kw_free,
.create = crypto_kw_create,
.module = THIS_MODULE,
};

Zobrazit soubor

@@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The MORUS-1280 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/unaligned.h>
@@ -366,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
const struct morus1280_ops *ops)
{
struct skcipher_walk walk;
u8 *dst;
const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, walk.nbytes);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}

Zobrazit soubor

@@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The MORUS-640 Authenticated-Encryption Algorithm
*
* Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <asm/unaligned.h>
@@ -365,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
const struct morus640_ops *ops)
{
struct skcipher_walk walk;
u8 *dst;
const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, walk.nbytes);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
}

Zobrazit soubor

@@ -5,9 +5,6 @@
*
* Copyright (C) 2018 ARM Limited or its affiliates.
* All rights reserved.
*
* Based loosely on public domain code gleaned from libtomcrypt
* (https://github.com/libtom/libtomcrypt).
*/
#include <crypto/algapi.h>
@@ -16,189 +13,70 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_ofb_ctx {
struct crypto_cipher *child;
int cnt;
};
static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
static int crypto_ofb_crypt(struct skcipher_request *req)
{
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
err = skcipher_walk_virt(&walk, req, false);
static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
int nbytes = walk->nbytes;
while (walk.nbytes >= bsize) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 * const iv = walk.iv;
unsigned int nbytes = walk.nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_cipher_encrypt_one(cipher, iv, iv);
crypto_xor_cpy(dst, src, iv, bsize);
dst += bsize;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
do {
if (ctx->cnt == bsize) {
if (nbytes < bsize)
break;
crypto_cipher_encrypt_one(tfm, iv, iv);
ctx->cnt = 0;
}
*dst = *src ^ iv[ctx->cnt];
src++;
dst++;
ctx->cnt++;
} while (--nbytes);
return nbytes;
}
static int crypto_ofb_encrypt(struct skcipher_request *req)
{
struct skcipher_walk walk;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
unsigned int bsize;
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
int ret = 0;
bsize = crypto_cipher_blocksize(child);
ctx->cnt = bsize;
ret = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
ret = skcipher_walk_done(&walk, ret);
err = skcipher_walk_done(&walk, nbytes);
}
return ret;
}
/* OFB encrypt and decrypt are identical */
static int crypto_ofb_decrypt(struct skcipher_request *req)
{
return crypto_ofb_encrypt(req);
}
static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static void crypto_ofb_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
if (walk.nbytes) {
crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct crypto_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
if (err)
return err;
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
/* OFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
goto err_free_inst;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
err = PTR_ERR(alg);
if (IS_ERR(alg))
goto err_free_inst;
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
crypto_mod_put(alg);
if (err)
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
if (err)
goto err_drop_spawn;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
/* We access the data as u32s when xoring. */
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.ivsize = alg->cra_blocksize;
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
inst->alg.init = crypto_ofb_init_tfm;
inst->alg.exit = crypto_ofb_exit_tfm;
inst->alg.setkey = crypto_ofb_setkey;
inst->alg.encrypt = crypto_ofb_encrypt;
inst->alg.decrypt = crypto_ofb_decrypt;
inst->free = crypto_ofb_free;
inst->alg.encrypt = crypto_ofb_crypt;
inst->alg.decrypt = crypto_ofb_crypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
inst->free(inst);
out:
crypto_mod_put(alg);
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_free_inst:
kfree(inst);
goto out;
}
static struct crypto_template crypto_ofb_tmpl = {
@@ -221,5 +99,5 @@ module_init(crypto_ofb_module_init);
module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OFB block cipher algorithm");
MODULE_DESCRIPTION("OFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ofb");

Zobrazit soubor

@@ -20,28 +20,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/compiler.h>
struct crypto_pcbc_ctx {
struct crypto_cipher *child;
};
static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct skcipher_walk *walk,
@@ -51,7 +29,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
@@ -72,7 +50,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
@@ -84,16 +62,13 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
@@ -103,10 +78,10 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
child);
cipher);
else
nbytes = crypto_pcbc_encrypt_segment(req, &walk,
child);
cipher);
err = skcipher_walk_done(&walk, nbytes);
}
@@ -121,7 +96,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
do {
crypto_cipher_decrypt_one(tfm, dst, src);
@@ -132,8 +107,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
@@ -144,7 +117,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
@@ -156,16 +129,13 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_pcbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
@@ -175,117 +145,34 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
child);
cipher);
else
nbytes = crypto_pcbc_decrypt_segment(req, &walk,
child);
cipher);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static void crypto_pcbc_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_attr_type *algt;
struct crypto_spawn *spawn;
struct crypto_alg *alg;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
inst = skcipher_alloc_instance_simple(tmpl, tb, &alg);
if (IS_ERR(inst))
return PTR_ERR(inst);
if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) &
~CRYPTO_ALG_INTERNAL)
return -EINVAL;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER |
(algt->type & CRYPTO_ALG_INTERNAL),
CRYPTO_ALG_TYPE_MASK |
(algt->mask & CRYPTO_ALG_INTERNAL));
err = PTR_ERR(alg);
if (IS_ERR(alg))
goto err_free_inst;
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
if (err)
goto err_drop_spawn;
inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.ivsize = alg->cra_blocksize;
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
inst->alg.init = crypto_pcbc_init_tfm;
inst->alg.exit = crypto_pcbc_exit_tfm;
inst->alg.setkey = crypto_pcbc_setkey;
inst->alg.encrypt = crypto_pcbc_encrypt;
inst->alg.decrypt = crypto_pcbc_decrypt;
inst->free = crypto_pcbc_free;
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
inst->free(inst);
crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
err_put_alg:
crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
}
static struct crypto_template crypto_pcbc_tmpl = {
@@ -308,5 +195,5 @@ module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher algorithm");
MODULE_DESCRIPTION("PCBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("pcbc");

Zobrazit soubor

@@ -12,6 +12,7 @@
#include <crypto/algapi.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>

Zobrazit soubor

@@ -89,13 +89,12 @@ static int seqiv_aead_encrypt(struct aead_request *req)
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
info = kmalloc(ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
GFP_ATOMIC);
info = kmemdup(req->iv, ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC);
if (!info)
return -ENOMEM;
memcpy(info, req->iv, ivsize);
compl = seqiv_aead_encrypt_complete;
data = req;
}

Zobrazit soubor

@@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
return err;
}
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
if (crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
else
err = shash->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
shash_set_needkey(tfm, shash);
return err;
}
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -373,15 +382,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
if (crypto_shash_alg_has_setkey(alg))
crt->setkey = shash_async_setkey;
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
crt->import = shash_async_import;
crt->export = shash_async_export;
crt->import = shash_async_import;
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
@@ -395,9 +403,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
hash->descsize = alg->descsize;
if (crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
shash_set_needkey(hash, alg);
return 0;
}
@@ -464,6 +470,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
alg->statesize > HASH_MAX_STATESIZE)
return -EINVAL;
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;

Zobrazit soubor

@@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
return crypto_alg_extsize(alg);
}
static void skcipher_set_needkey(struct crypto_skcipher *tfm)
{
if (tfm->keysize)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
if (err)
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
}
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
if (skcipher->keysize)
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
skcipher_set_needkey(skcipher);
return 0;
}
@@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
if (err)
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
}
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
if (skcipher->keysize)
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
skcipher_set_needkey(skcipher);
return 0;
}
@@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
else
err = cipher->setkey(tfm, key, keylen);
if (err)
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
}
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
@@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
if (skcipher->keysize)
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
skcipher_set_needkey(skcipher);
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
@@ -1058,5 +1067,136 @@ int skcipher_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(skcipher_register_instance);
static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
int err;
crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(cipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->cipher = cipher;
return 0;
}
static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
{
struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->cipher);
}
static void skcipher_free_instance_simple(struct skcipher_instance *inst)
{
crypto_drop_spawn(skcipher_instance_ctx(inst));
kfree(inst);
}
/**
* skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
*
* Allocate an skcipher_instance for a simple block cipher mode of operation,
* e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
* that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
* alignmask, and priority are set from the underlying cipher but can be
* overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
* default ->setkey(), ->init(), and ->exit() methods are installed.
*
* @tmpl: the template being instantiated
* @tb: the template parameters
* @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
* returned here. It must be dropped with crypto_mod_put().
*
* Return: a pointer to the new instance, or an ERR_PTR(). The caller still
* needs to register the instance.
*/
struct skcipher_instance *
skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
struct crypto_alg **cipher_alg_ret)
{
struct crypto_attr_type *algt;
struct crypto_alg *cipher_alg;
struct skcipher_instance *inst;
struct crypto_spawn *spawn;
u32 mask;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
return ERR_PTR(-EINVAL);
mask = CRYPTO_ALG_TYPE_MASK |
crypto_requires_off(algt->type, algt->mask,
CRYPTO_ALG_NEED_FALLBACK);
cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
if (IS_ERR(cipher_alg))
return ERR_CAST(cipher_alg);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst) {
err = -ENOMEM;
goto err_put_cipher_alg;
}
spawn = skcipher_instance_ctx(inst);
err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
cipher_alg);
if (err)
goto err_free_inst;
err = crypto_init_spawn(spawn, cipher_alg,
skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
if (err)
goto err_free_inst;
inst->free = skcipher_free_instance_simple;
/* Default algorithm properties, can be overridden */
inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
inst->alg.base.cra_priority = cipher_alg->cra_priority;
inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
inst->alg.ivsize = cipher_alg->cra_blocksize;
/* Use skcipher_ctx_simple by default, can be overridden */
inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
inst->alg.setkey = skcipher_setkey_simple;
inst->alg.init = skcipher_init_tfm_simple;
inst->alg.exit = skcipher_exit_tfm_simple;
*cipher_alg_ret = cipher_alg;
return inst;
err_free_inst:
kfree(inst);
err_put_cipher_alg:
crypto_mod_put(cipher_alg);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");

Zobrazit soubor

@@ -960,7 +960,7 @@ static int streebog_init(struct shash_desc *desc)
memset(ctx, 0, sizeof(struct streebog_state));
for (i = 0; i < 8; i++) {
if (digest_size == STREEBOG256_DIGEST_SIZE)
ctx->h.qword[i] = 0x0101010101010101ULL;
ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL);
}
return 0;
}

Rozdílový obsah nebyl zobrazen, protože je příliš veliký Načíst rozdílové porovnání

Rozdílový obsah nebyl zobrazen, protože je příliš veliký Načíst rozdílové porovnání

Zobrazit soubor

@@ -25,8 +25,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20
@@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
u64 a, b, c, aa, bb, cc;
u64 x[8];
int i;
const __le64 *ptr = (const __le64 *)data;
for (i = 0; i < 8; i++)
x[i] = le64_to_cpu(ptr[i]);
x[i] = get_unaligned_le64(data + i * sizeof(__le64));
/* save */
a = aa = tctx->a;