123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * aes-ce-glue.c - wrapper code for ARMv8 AES
- *
- * Copyright (C) 2015 Linaro Ltd <[email protected]>
- */
- #include <asm/hwcap.h>
- #include <asm/neon.h>
- #include <asm/simd.h>
- #include <asm/unaligned.h>
- #include <crypto/aes.h>
- #include <crypto/ctr.h>
- #include <crypto/internal/simd.h>
- #include <crypto/internal/skcipher.h>
- #include <crypto/scatterwalk.h>
- #include <linux/cpufeature.h>
- #include <linux/module.h>
- #include <crypto/xts.h>
- MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
- MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
- MODULE_LICENSE("GPL v2");
- /* defined in aes-ce-core.S */
- asmlinkage u32 ce_aes_sub(u32 input);
- asmlinkage void ce_aes_invert(void *dst, void *src);
- asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks);
- asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks);
- asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, u8 iv[]);
- asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, u8 iv[]);
- asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int bytes, u8 const iv[]);
- asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int bytes, u8 const iv[]);
- asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
- int rounds, int blocks, u8 ctr[]);
- asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
- int rounds, int bytes, u8 iv[],
- u32 const rk2[], int first);
- asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
- int rounds, int bytes, u8 iv[],
- u32 const rk2[], int first);
- struct aes_block {
- u8 b[AES_BLOCK_SIZE];
- };
- static int num_rounds(struct crypto_aes_ctx *ctx)
- {
- /*
- * # of rounds specified by AES:
- * 128 bit key 10 rounds
- * 192 bit key 12 rounds
- * 256 bit key 14 rounds
- * => n byte key => 6 + (n/4) rounds
- */
- return 6 + ctx->key_length / 4;
- }
- static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
- unsigned int key_len)
- {
- /*
- * The AES key schedule round constants
- */
- static u8 const rcon[] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
- };
- u32 kwords = key_len / sizeof(u32);
- struct aes_block *key_enc, *key_dec;
- int i, j;
- if (key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256)
- return -EINVAL;
- ctx->key_length = key_len;
- for (i = 0; i < kwords; i++)
- ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
- kernel_neon_begin();
- for (i = 0; i < sizeof(rcon); i++) {
- u32 *rki = ctx->key_enc + (i * kwords);
- u32 *rko = rki + kwords;
- rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
- rko[0] = rko[0] ^ rki[0] ^ rcon[i];
- rko[1] = rko[0] ^ rki[1];
- rko[2] = rko[1] ^ rki[2];
- rko[3] = rko[2] ^ rki[3];
- if (key_len == AES_KEYSIZE_192) {
- if (i >= 7)
- break;
- rko[4] = rko[3] ^ rki[4];
- rko[5] = rko[4] ^ rki[5];
- } else if (key_len == AES_KEYSIZE_256) {
- if (i >= 6)
- break;
- rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
- rko[5] = rko[4] ^ rki[5];
- rko[6] = rko[5] ^ rki[6];
- rko[7] = rko[6] ^ rki[7];
- }
- }
- /*
- * Generate the decryption keys for the Equivalent Inverse Cipher.
- * This involves reversing the order of the round keys, and applying
- * the Inverse Mix Columns transformation on all but the first and
- * the last one.
- */
- key_enc = (struct aes_block *)ctx->key_enc;
- key_dec = (struct aes_block *)ctx->key_dec;
- j = num_rounds(ctx);
- key_dec[0] = key_enc[j];
- for (i = 1, j--; j > 0; i++, j--)
- ce_aes_invert(key_dec + i, key_enc + j);
- key_dec[i] = key_enc[0];
- kernel_neon_end();
- return 0;
- }
- static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
- {
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- return ce_aes_expandkey(ctx, in_key, key_len);
- }
- struct crypto_aes_xts_ctx {
- struct crypto_aes_ctx key1;
- struct crypto_aes_ctx __aligned(8) key2;
- };
- static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
- {
- struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int ret;
- ret = xts_verify_key(tfm, in_key, key_len);
- if (ret)
- return ret;
- ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
- if (!ret)
- ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
- key_len / 2);
- return ret;
- }
- static int ecb_encrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int blocks;
- int err;
- err = skcipher_walk_virt(&walk, req, false);
- while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, num_rounds(ctx), blocks);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- return err;
- }
- static int ecb_decrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int blocks;
- int err;
- err = skcipher_walk_virt(&walk, req, false);
- while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_dec, num_rounds(ctx), blocks);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- return err;
- }
- static int cbc_encrypt_walk(struct skcipher_request *req,
- struct skcipher_walk *walk)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned int blocks;
- int err = 0;
- while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
- ctx->key_enc, num_rounds(ctx), blocks,
- walk->iv);
- kernel_neon_end();
- err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
- }
- return err;
- }
- static int cbc_encrypt(struct skcipher_request *req)
- {
- struct skcipher_walk walk;
- int err;
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
- return cbc_encrypt_walk(req, &walk);
- }
- static int cbc_decrypt_walk(struct skcipher_request *req,
- struct skcipher_walk *walk)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned int blocks;
- int err = 0;
- while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
- ctx->key_dec, num_rounds(ctx), blocks,
- walk->iv);
- kernel_neon_end();
- err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
- }
- return err;
- }
- static int cbc_decrypt(struct skcipher_request *req)
- {
- struct skcipher_walk walk;
- int err;
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
- return cbc_decrypt_walk(req, &walk);
- }
- static int cts_cbc_encrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
- struct scatterlist *src = req->src, *dst = req->dst;
- struct scatterlist sg_src[2], sg_dst[2];
- struct skcipher_request subreq;
- struct skcipher_walk walk;
- int err;
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
- NULL, NULL);
- if (req->cryptlen <= AES_BLOCK_SIZE) {
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
- cbc_blocks = 1;
- }
- if (cbc_blocks > 0) {
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- cbc_blocks * AES_BLOCK_SIZE,
- req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false) ?:
- cbc_encrypt_walk(&subreq, &walk);
- if (err)
- return err;
- if (req->cryptlen == AES_BLOCK_SIZE)
- return 0;
- dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst,
- subreq.cryptlen);
- }
- /* handle ciphertext stealing */
- skcipher_request_set_crypt(&subreq, src, dst,
- req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
- req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
- kernel_neon_begin();
- ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, num_rounds(ctx), walk.nbytes,
- walk.iv);
- kernel_neon_end();
- return skcipher_walk_done(&walk, 0);
- }
- static int cts_cbc_decrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
- struct scatterlist *src = req->src, *dst = req->dst;
- struct scatterlist sg_src[2], sg_dst[2];
- struct skcipher_request subreq;
- struct skcipher_walk walk;
- int err;
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
- NULL, NULL);
- if (req->cryptlen <= AES_BLOCK_SIZE) {
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
- cbc_blocks = 1;
- }
- if (cbc_blocks > 0) {
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- cbc_blocks * AES_BLOCK_SIZE,
- req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false) ?:
- cbc_decrypt_walk(&subreq, &walk);
- if (err)
- return err;
- if (req->cryptlen == AES_BLOCK_SIZE)
- return 0;
- dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst,
- subreq.cryptlen);
- }
- /* handle ciphertext stealing */
- skcipher_request_set_crypt(&subreq, src, dst,
- req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
- req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
- kernel_neon_begin();
- ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_dec, num_rounds(ctx), walk.nbytes,
- walk.iv);
- kernel_neon_end();
- return skcipher_walk_done(&walk, 0);
- }
- static int ctr_encrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- int err, blocks;
- err = skcipher_walk_virt(&walk, req, false);
- while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
- kernel_neon_begin();
- ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key_enc, num_rounds(ctx), blocks,
- walk.iv);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
- }
- if (walk.nbytes) {
- u8 __aligned(8) tail[AES_BLOCK_SIZE];
- unsigned int nbytes = walk.nbytes;
- u8 *tdst = walk.dst.virt.addr;
- u8 *tsrc = walk.src.virt.addr;
- /*
- * Tell aes_ctr_encrypt() to process a tail block.
- */
- blocks = -1;
- kernel_neon_begin();
- ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
- blocks, walk.iv);
- kernel_neon_end();
- crypto_xor_cpy(tdst, tsrc, tail, nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
- return err;
- }
- static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
- {
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(ctx, dst, src);
- local_irq_restore(flags);
- }
- static int ctr_encrypt_sync(struct skcipher_request *req)
- {
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
- return ctr_encrypt(req);
- }
- static int xts_encrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = num_rounds(&ctx->key1);
- int tail = req->cryptlen % AES_BLOCK_SIZE;
- struct scatterlist sg_src[2], sg_dst[2];
- struct skcipher_request subreq;
- struct scatterlist *src, *dst;
- struct skcipher_walk walk;
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
- err = skcipher_walk_virt(&walk, req, false);
- if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
- int xts_blocks = DIV_ROUND_UP(req->cryptlen,
- AES_BLOCK_SIZE) - 2;
- skcipher_walk_abort(&walk);
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- xts_blocks * AES_BLOCK_SIZE,
- req->iv);
- req = &subreq;
- err = skcipher_walk_virt(&walk, req, false);
- } else {
- tail = 0;
- }
- for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
- int nbytes = walk.nbytes;
- if (walk.nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
- kernel_neon_begin();
- ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key1.key_enc, rounds, nbytes, walk.iv,
- ctx->key2.key_enc, first);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
- if (err || likely(!tail))
- return err;
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
- kernel_neon_begin();
- ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key1.key_enc, rounds, walk.nbytes, walk.iv,
- ctx->key2.key_enc, first);
- kernel_neon_end();
- return skcipher_walk_done(&walk, 0);
- }
- static int xts_decrypt(struct skcipher_request *req)
- {
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err, first, rounds = num_rounds(&ctx->key1);
- int tail = req->cryptlen % AES_BLOCK_SIZE;
- struct scatterlist sg_src[2], sg_dst[2];
- struct skcipher_request subreq;
- struct scatterlist *src, *dst;
- struct skcipher_walk walk;
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
- err = skcipher_walk_virt(&walk, req, false);
- if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
- int xts_blocks = DIV_ROUND_UP(req->cryptlen,
- AES_BLOCK_SIZE) - 2;
- skcipher_walk_abort(&walk);
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- xts_blocks * AES_BLOCK_SIZE,
- req->iv);
- req = &subreq;
- err = skcipher_walk_virt(&walk, req, false);
- } else {
- tail = 0;
- }
- for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
- int nbytes = walk.nbytes;
- if (walk.nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
- kernel_neon_begin();
- ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key1.key_dec, rounds, nbytes, walk.iv,
- ctx->key2.key_enc, first);
- kernel_neon_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
- if (err || likely(!tail))
- return err;
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
- kernel_neon_begin();
- ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key1.key_dec, rounds, walk.nbytes, walk.iv,
- ctx->key2.key_enc, first);
- kernel_neon_end();
- return skcipher_walk_done(&walk, 0);
- }
- static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-ce",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- }, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-ce",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "__cts(cbc(aes))",
- .base.cra_driver_name = "__cts-cbc-aes-ce",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .walksize = 2 * AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = cts_cbc_encrypt,
- .decrypt = cts_cbc_decrypt,
- }, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-ce",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt,
- .decrypt = ctr_encrypt,
- }, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-ce-sync",
- .base.cra_priority = 300 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
- }, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-ce",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .walksize = 2 * AES_BLOCK_SIZE,
- .setkey = xts_set_key,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- } };
- static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
- static void aes_exit(void)
- {
- int i;
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
- simd_skcipher_free(aes_simd_algs[i]);
- crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- }
- static int __init aes_init(void)
- {
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
- aes_simd_algs[i] = simd;
- }
- return 0;
- unregister_simds:
- aes_exit();
- return err;
- }
- module_cpu_feature_match(AES, aes_init);
- module_exit(aes_exit);
|