Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 3.15: - Added 3DES driver for OMAP4/AM43xx - Added AVX2 acceleration for SHA - Added hash-only AEAD algorithms in caam - Removed tegra driver as it is not functioning and the hardware is too slow - Allow blkcipher walks over AEAD (needed for ARM) - Fixed unprotected FPU/SSE access in ghash-clmulni-intel - Fixed highmem crash in omap-sham - Add (zero entropy) randomness when initialising hardware RNGs - Fixed unaligned ahash comletion functions - Added soft module depedency for crc32c for initrds that use crc32c" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (60 commits) crypto: ghash-clmulni-intel - use C implementation for setkey() crypto: x86/sha1 - reduce size of the AVX2 asm implementation crypto: x86/sha1 - fix stack alignment of AVX2 variant crypto: x86/sha1 - re-enable the AVX variant crypto: sha - SHA1 transform x86_64 AVX2 crypto: crypto_wq - Fix late crypto work queue initialization crypto: caam - add missing key_dma unmap crypto: caam - add support for aead null encryption crypto: testmgr - add aead null encryption test vectors crypto: export NULL algorithms defines crypto: caam - remove error propagation handling crypto: hash - Simplify the ahash_finup implementation crypto: hash - Pull out the functions to save/restore request crypto: hash - Fix the pointer voodoo in unaligned ahash crypto: caam - Fix first parameter to caam_init_rng crypto: omap-sham - Map SG pages if they are HIGHMEM before accessing crypto: caam - Dynamic memory allocation for caam_rng_ctx object crypto: allow blkcipher walks over AEAD data crypto: remove direct blkcipher_walk dependency on transform hwrng: add randomness to system from rng sources ...
This commit is contained in:
@@ -262,6 +262,17 @@ config CRYPTO_DEV_OMAP_AES
|
||||
OMAP processors have AES module accelerator. Select this if you
|
||||
want to use the OMAP module for AES algorithms.
|
||||
|
||||
config CRYPTO_DEV_OMAP_DES
|
||||
tristate "Support for OMAP DES3DES hw engine"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_BLKCIPHER2
|
||||
help
|
||||
OMAP processors have DES/3DES module accelerator. Select this if you
|
||||
want to use the OMAP module for DES and 3DES algorithms. Currently
|
||||
the ECB and CBC modes of operation supported by the driver. Also
|
||||
accesses made on unaligned boundaries are also supported.
|
||||
|
||||
config CRYPTO_DEV_PICOXCELL
|
||||
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
|
||||
depends on ARCH_PICOXCELL && HAVE_CLK
|
||||
@@ -300,17 +311,6 @@ config CRYPTO_DEV_S5P
|
||||
Select this to offload Samsung S5PV210 or S5PC110 from AES
|
||||
algorithms execution.
|
||||
|
||||
config CRYPTO_DEV_TEGRA_AES
|
||||
tristate "Support for TEGRA AES hw engine"
|
||||
depends on ARCH_TEGRA
|
||||
select CRYPTO_AES
|
||||
help
|
||||
TEGRA processors have AES module accelerator. Select this if you
|
||||
want to use the TEGRA module for AES algorithms.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called tegra-aes.
|
||||
|
||||
config CRYPTO_DEV_NX
|
||||
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
|
||||
depends on PPC64 && IBMVIO
|
||||
|
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
|
||||
n2_crypto-y := n2_core.o n2_asm.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
|
||||
@@ -21,5 +22,4 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
|
||||
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
|
||||
|
@@ -139,7 +139,6 @@ static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
|
||||
/* setup CRC interrupts */
|
||||
crc->regs->status = CMPERRI | DCNTEXPI;
|
||||
crc->regs->intrenset = CMPERRI | DCNTEXPI;
|
||||
SSYNC();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -285,17 +284,12 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
|
||||
if (i == 0)
|
||||
return;
|
||||
|
||||
flush_dcache_range((unsigned int)crc->sg_cpu,
|
||||
(unsigned int)crc->sg_cpu +
|
||||
i * sizeof(struct dma_desc_array));
|
||||
|
||||
/* Set the last descriptor to stop mode */
|
||||
crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
|
||||
crc->sg_cpu[i - 1].cfg |= DI_EN;
|
||||
set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
|
||||
set_dma_x_count(crc->dma_ch, 0);
|
||||
set_dma_x_modify(crc->dma_ch, 0);
|
||||
SSYNC();
|
||||
set_dma_config(crc->dma_ch, dma_config);
|
||||
}
|
||||
|
||||
@@ -415,7 +409,6 @@ finish_update:
|
||||
|
||||
/* finally kick off CRC operation */
|
||||
crc->regs->control |= BLKEN;
|
||||
SSYNC();
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
@@ -539,7 +532,6 @@ static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
|
||||
|
||||
if (crc->regs->status & DCNTEXP) {
|
||||
crc->regs->status = DCNTEXP;
|
||||
SSYNC();
|
||||
|
||||
/* prepare results */
|
||||
put_unaligned_le32(crc->regs->result, crc->req->result);
|
||||
@@ -594,7 +586,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
|
||||
unsigned int timeout = 100000;
|
||||
int ret;
|
||||
|
||||
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
|
||||
crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
|
||||
if (!crc) {
|
||||
dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
|
||||
return -ENOMEM;
|
||||
@@ -610,42 +602,39 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res == NULL) {
|
||||
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
|
||||
ret = -ENOENT;
|
||||
goto out_error_free_mem;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
crc->regs = ioremap(res->start, resource_size(res));
|
||||
if (!crc->regs) {
|
||||
crc->regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR((void *)crc->regs)) {
|
||||
dev_err(&pdev->dev, "Cannot map CRC IO\n");
|
||||
ret = -ENXIO;
|
||||
goto out_error_free_mem;
|
||||
return PTR_ERR((void *)crc->regs);
|
||||
}
|
||||
|
||||
crc->irq = platform_get_irq(pdev, 0);
|
||||
if (crc->irq < 0) {
|
||||
dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
|
||||
ret = -ENOENT;
|
||||
goto out_error_unmap;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
|
||||
ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
|
||||
IRQF_SHARED, dev_name(dev), crc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
|
||||
goto out_error_unmap;
|
||||
return ret;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
|
||||
if (res == NULL) {
|
||||
dev_err(&pdev->dev, "No CRC DMA channel specified\n");
|
||||
ret = -ENOENT;
|
||||
goto out_error_irq;
|
||||
return -ENOENT;
|
||||
}
|
||||
crc->dma_ch = res->start;
|
||||
|
||||
ret = request_dma(crc->dma_ch, dev_name(dev));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
|
||||
goto out_error_irq;
|
||||
return ret;
|
||||
}
|
||||
|
||||
crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
|
||||
@@ -660,9 +649,7 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
|
||||
crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
|
||||
|
||||
crc->regs->control = 0;
|
||||
SSYNC();
|
||||
crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
|
||||
SSYNC();
|
||||
|
||||
while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
|
||||
cpu_relax();
|
||||
@@ -693,12 +680,6 @@ out_error_dma:
|
||||
if (crc->sg_cpu)
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
|
||||
free_dma(crc->dma_ch);
|
||||
out_error_irq:
|
||||
free_irq(crc->irq, crc);
|
||||
out_error_unmap:
|
||||
iounmap((void *)crc->regs);
|
||||
out_error_free_mem:
|
||||
kfree(crc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -721,10 +702,6 @@ static int bfin_crypto_crc_remove(struct platform_device *pdev)
|
||||
crypto_unregister_ahash(&algs);
|
||||
tasklet_kill(&crc->done_task);
|
||||
free_dma(crc->dma_ch);
|
||||
if (crc->irq > 0)
|
||||
free_irq(crc->irq, crc);
|
||||
iounmap((void *)crc->regs);
|
||||
kfree(crc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -66,10 +66,14 @@
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
20 * CAAM_CMD_SZ)
|
||||
@@ -103,28 +107,15 @@ static inline void append_dec_op1(u32 *desc, u32 type)
|
||||
set_jump_tgt_here(desc, uncond_jump_cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for completion of class 1 key loading before allowing
|
||||
* error propagation
|
||||
*/
|
||||
static inline void append_dec_shr_done(u32 *desc)
|
||||
{
|
||||
u32 *jump_cmd;
|
||||
|
||||
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
|
||||
set_jump_tgt_here(desc, jump_cmd);
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* For aead functions, read payload and write payload,
|
||||
* both of which are specified in req->src and req->dst
|
||||
*/
|
||||
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
|
||||
{
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
|
||||
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -211,9 +202,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
|
||||
append_key_aead(desc, ctx, keys_fit_inline);
|
||||
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
}
|
||||
|
||||
/* Propagate errors from shared to job descriptor */
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
static int aead_null_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct aead_tfm *tfm = &aead->base.crt_aead;
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool keys_fit_inline = false;
|
||||
u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
|
||||
u32 *desc;
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptors
|
||||
* must all fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
|
||||
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
|
||||
keys_fit_inline = true;
|
||||
|
||||
/* aead_encrypt shared descriptor */
|
||||
desc = ctx->sh_desc_enc;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Skip if already shared */
|
||||
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
if (keys_fit_inline)
|
||||
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
|
||||
ctx->split_key_len, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
else
|
||||
append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
|
||||
/* cryptlen = seqoutlen - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
|
||||
|
||||
/*
|
||||
* NULL encryption; IV is zero
|
||||
* assoclen = (assoclen + cryptlen) - cryptlen
|
||||
*/
|
||||
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* read assoc before reading payload */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
|
||||
KEY_VLF);
|
||||
|
||||
/* Prepare to read and write cryptlen bytes */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/*
|
||||
* MOVE_LEN opcode is not available in all SEC HW revisions,
|
||||
* thus need to do some magic, i.e. self-patch the descriptor
|
||||
* buffer.
|
||||
*/
|
||||
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
|
||||
MOVE_DEST_MATH3 |
|
||||
(0x6 << MOVE_LEN_SHIFT));
|
||||
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
|
||||
MOVE_DEST_DESCBUF |
|
||||
MOVE_WAITCOMP |
|
||||
(0x8 << MOVE_LEN_SHIFT));
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, ctx->class2_alg_type |
|
||||
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
|
||||
|
||||
/* Read and write cryptlen bytes */
|
||||
aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
|
||||
|
||||
set_move_tgt_here(desc, read_move_cmd);
|
||||
set_move_tgt_here(desc, write_move_cmd);
|
||||
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
|
||||
append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
|
||||
MOVE_AUX_LS);
|
||||
|
||||
/* Write ICV */
|
||||
append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"aead null enc shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptors
|
||||
* must all fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
|
||||
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
|
||||
keys_fit_inline = true;
|
||||
|
||||
desc = ctx->sh_desc_dec;
|
||||
|
||||
/* aead_decrypt shared descriptor */
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Skip if already shared */
|
||||
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
if (keys_fit_inline)
|
||||
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
|
||||
ctx->split_key_len, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
else
|
||||
append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, ctx->class2_alg_type |
|
||||
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
|
||||
|
||||
/* assoclen + cryptlen = seqinlen - ivsize - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
|
||||
ctx->authsize + tfm->ivsize);
|
||||
/* assoclen = (assoclen + cryptlen) - cryptlen */
|
||||
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
|
||||
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
|
||||
|
||||
/* read assoc before reading payload */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
|
||||
KEY_VLF);
|
||||
|
||||
/* Prepare to read and write cryptlen bytes */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
|
||||
|
||||
/*
|
||||
* MOVE_LEN opcode is not available in all SEC HW revisions,
|
||||
* thus need to do some magic, i.e. self-patch the descriptor
|
||||
* buffer.
|
||||
*/
|
||||
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
|
||||
MOVE_DEST_MATH2 |
|
||||
(0x6 << MOVE_LEN_SHIFT));
|
||||
write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
|
||||
MOVE_DEST_DESCBUF |
|
||||
MOVE_WAITCOMP |
|
||||
(0x8 << MOVE_LEN_SHIFT));
|
||||
|
||||
/* Read and write cryptlen bytes */
|
||||
aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
|
||||
|
||||
/*
|
||||
* Insert a NOP here, since we need at least 4 instructions between
|
||||
* code patching the descriptor buffer and the location being patched.
|
||||
*/
|
||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
||||
set_jump_tgt_here(desc, jump_cmd);
|
||||
|
||||
set_move_tgt_here(desc, read_move_cmd);
|
||||
set_move_tgt_here(desc, write_move_cmd);
|
||||
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
|
||||
append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
|
||||
MOVE_AUX_LS);
|
||||
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
|
||||
|
||||
/* Load ICV */
|
||||
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
|
||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||
|
||||
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"aead null dec shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
@@ -222,13 +400,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
bool keys_fit_inline = false;
|
||||
u32 *key_jump_cmd, *jump_cmd;
|
||||
u32 geniv, moveiv;
|
||||
u32 *desc;
|
||||
|
||||
if (!ctx->enckeylen || !ctx->authsize)
|
||||
if (!ctx->authsize)
|
||||
return 0;
|
||||
|
||||
/* NULL encryption / decryption */
|
||||
if (!ctx->enckeylen)
|
||||
return aead_null_set_sh_desc(aead);
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptors
|
||||
* must all fit into the 64-word Descriptor h/w Buffer
|
||||
@@ -253,7 +434,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
/* assoclen + cryptlen = seqinlen - ivsize */
|
||||
append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
|
||||
|
||||
/* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
|
||||
/* assoclen = (assoclen + cryptlen) - cryptlen */
|
||||
append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* read assoc before reading payload */
|
||||
@@ -296,30 +477,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
CAAM_DESC_BYTES_MAX)
|
||||
keys_fit_inline = true;
|
||||
|
||||
/* aead_decrypt shared descriptor */
|
||||
desc = ctx->sh_desc_dec;
|
||||
|
||||
/* aead_decrypt shared descriptor */
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Skip if already shared */
|
||||
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
|
||||
append_key_aead(desc, ctx, keys_fit_inline);
|
||||
|
||||
/* Only propagate error immediately if shared */
|
||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
set_jump_tgt_here(desc, jump_cmd);
|
||||
init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, ctx->class2_alg_type |
|
||||
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
|
||||
|
||||
/* assoclen + cryptlen = seqinlen - ivsize */
|
||||
/* assoclen + cryptlen = seqinlen - ivsize - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
|
||||
ctx->authsize + tfm->ivsize)
|
||||
ctx->authsize + tfm->ivsize);
|
||||
/* assoclen = (assoclen + cryptlen) - cryptlen */
|
||||
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
|
||||
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
|
||||
@@ -340,7 +509,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
||||
/* Load ICV */
|
||||
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
|
||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||
append_dec_shr_done(desc);
|
||||
|
||||
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
@@ -532,7 +700,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret = 0;
|
||||
u32 *key_jump_cmd, *jump_cmd;
|
||||
u32 *key_jump_cmd;
|
||||
u32 *desc;
|
||||
|
||||
#ifdef DEBUG
|
||||
@@ -563,9 +731,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
|
||||
/* Propagate errors from shared to job descriptor */
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
|
||||
/* Load iv */
|
||||
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | tfm->ivsize);
|
||||
@@ -603,11 +768,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
ctx->enckeylen, CLASS_1 |
|
||||
KEY_DEST_CLASS_REG);
|
||||
|
||||
/* For aead, only propagate error immediately if shared */
|
||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
set_jump_tgt_here(desc, jump_cmd);
|
||||
|
||||
/* load IV */
|
||||
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
||||
@@ -619,9 +780,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
|
||||
/* Wait for key to load before allowing propagating error */
|
||||
append_dec_shr_done(desc);
|
||||
|
||||
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
@@ -1459,6 +1617,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
|
||||
{
|
||||
return aead_encrypt(&areq->areq);
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate and map the ablkcipher extended descriptor for ablkcipher
|
||||
*/
|
||||
@@ -1647,6 +1810,124 @@ struct caam_alg_template {
|
||||
|
||||
static struct caam_alg_template driver_algs[] = {
|
||||
/* single-pass ipsec_esp descriptor */
|
||||
{
|
||||
.name = "authenc(hmac(md5),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(sha1),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(sha224),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
|
||||
OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(sha256),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
|
||||
OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(sha384),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
|
||||
OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(sha512),ecb(cipher_null))",
|
||||
.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
|
||||
.blocksize = NULL_BLOCK_SIZE,
|
||||
.type = CRYPTO_ALG_TYPE_AEAD,
|
||||
.template_aead = {
|
||||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.givencrypt = aead_null_givencrypt,
|
||||
.geniv = "<built-in>",
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
.class1_alg_type = 0,
|
||||
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
|
||||
OP_ALG_AAI_HMAC_PRECOMP,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
{
|
||||
.name = "authenc(hmac(md5),cbc(aes))",
|
||||
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
|
||||
@@ -2099,6 +2380,11 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
|
||||
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
|
||||
desc_bytes(ctx->sh_desc_givenc),
|
||||
DMA_TO_DEVICE);
|
||||
if (ctx->key_dma &&
|
||||
!dma_mapping_error(ctx->jrdev, ctx->key_dma))
|
||||
dma_unmap_single(ctx->jrdev, ctx->key_dma,
|
||||
ctx->enckeylen + ctx->split_key_pad_len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
caam_jr_free(ctx->jrdev);
|
||||
}
|
||||
|
@@ -76,7 +76,7 @@ struct caam_rng_ctx {
|
||||
struct buf_data bufs[2];
|
||||
};
|
||||
|
||||
static struct caam_rng_ctx rng_ctx;
|
||||
static struct caam_rng_ctx *rng_ctx;
|
||||
|
||||
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
|
||||
{
|
||||
@@ -137,7 +137,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
||||
|
||||
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
struct caam_rng_ctx *ctx = &rng_ctx;
|
||||
struct caam_rng_ctx *ctx = rng_ctx;
|
||||
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
|
||||
int next_buf_idx, copied_idx;
|
||||
int err;
|
||||
@@ -237,12 +237,12 @@ static void caam_cleanup(struct hwrng *rng)
|
||||
struct buf_data *bd;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
bd = &rng_ctx.bufs[i];
|
||||
bd = &rng_ctx->bufs[i];
|
||||
if (atomic_read(&bd->empty) == BUF_PENDING)
|
||||
wait_for_completion(&bd->filled);
|
||||
}
|
||||
|
||||
rng_unmap_ctx(&rng_ctx);
|
||||
rng_unmap_ctx(rng_ctx);
|
||||
}
|
||||
|
||||
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
|
||||
@@ -273,8 +273,9 @@ static struct hwrng caam_rng = {
|
||||
|
||||
static void __exit caam_rng_exit(void)
|
||||
{
|
||||
caam_jr_free(rng_ctx.jrdev);
|
||||
caam_jr_free(rng_ctx->jrdev);
|
||||
hwrng_unregister(&caam_rng);
|
||||
kfree(rng_ctx);
|
||||
}
|
||||
|
||||
static int __init caam_rng_init(void)
|
||||
@@ -286,8 +287,10 @@ static int __init caam_rng_init(void)
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
return PTR_ERR(dev);
|
||||
}
|
||||
|
||||
caam_init_rng(&rng_ctx, dev);
|
||||
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
|
||||
if (!rng_ctx)
|
||||
return -ENOMEM;
|
||||
caam_init_rng(rng_ctx, dev);
|
||||
|
||||
dev_info(dev, "registering rng-caam\n");
|
||||
return hwrng_register(&caam_rng);
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/sha.h>
|
||||
|
@@ -14,7 +14,6 @@
|
||||
#include "jr.h"
|
||||
#include "desc_constr.h"
|
||||
#include "error.h"
|
||||
#include "ctrl.h"
|
||||
|
||||
/*
|
||||
* Descriptor to instantiate RNG State Handle 0 in normal mode and
|
||||
@@ -352,32 +351,17 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
|
||||
|
||||
/**
|
||||
* caam_get_era() - Return the ERA of the SEC on SoC, based
|
||||
* on the SEC_VID register.
|
||||
* Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
|
||||
* @caam_id - the value of the SEC_VID register
|
||||
* on "sec-era" propery in the DTS. This property is updated by u-boot.
|
||||
**/
|
||||
int caam_get_era(u64 caam_id)
|
||||
int caam_get_era(void)
|
||||
{
|
||||
struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
|
||||
static const struct {
|
||||
u16 ip_id;
|
||||
u8 maj_rev;
|
||||
u8 era;
|
||||
} caam_eras[] = {
|
||||
{0x0A10, 1, 1},
|
||||
{0x0A10, 2, 2},
|
||||
{0x0A12, 1, 3},
|
||||
{0x0A14, 1, 3},
|
||||
{0x0A14, 2, 4},
|
||||
{0x0A16, 1, 4},
|
||||
{0x0A11, 1, 4}
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
|
||||
if (caam_eras[i].ip_id == sec_vid->ip_id &&
|
||||
caam_eras[i].maj_rev == sec_vid->maj_rev)
|
||||
return caam_eras[i].era;
|
||||
struct device_node *caam_node;
|
||||
for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
|
||||
const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
|
||||
"fsl,sec-era",
|
||||
NULL);
|
||||
return prop ? *prop : -ENOTSUPP;
|
||||
}
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
@@ -443,13 +427,10 @@ static int caam_probe(struct platform_device *pdev)
|
||||
* for all, then go probe each one.
|
||||
*/
|
||||
rspec = 0;
|
||||
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
|
||||
rspec++;
|
||||
if (!rspec) {
|
||||
/* for backward compatible with device trees */
|
||||
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
|
||||
for_each_available_child_of_node(nprop, np)
|
||||
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
|
||||
rspec++;
|
||||
}
|
||||
|
||||
ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
|
||||
GFP_KERNEL);
|
||||
@@ -460,18 +441,9 @@ static int caam_probe(struct platform_device *pdev)
|
||||
|
||||
ring = 0;
|
||||
ctrlpriv->total_jobrs = 0;
|
||||
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
|
||||
ctrlpriv->jrpdev[ring] =
|
||||
of_platform_device_create(np, NULL, dev);
|
||||
if (!ctrlpriv->jrpdev[ring]) {
|
||||
pr_warn("JR%d Platform device creation error\n", ring);
|
||||
continue;
|
||||
}
|
||||
ctrlpriv->total_jobrs++;
|
||||
ring++;
|
||||
}
|
||||
if (!ring) {
|
||||
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
|
||||
for_each_available_child_of_node(nprop, np)
|
||||
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
||||
ctrlpriv->jrpdev[ring] =
|
||||
of_platform_device_create(np, NULL, dev);
|
||||
if (!ctrlpriv->jrpdev[ring]) {
|
||||
@@ -482,7 +454,6 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->total_jobrs++;
|
||||
ring++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check to see if QI present. If so, enable */
|
||||
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
|
||||
@@ -564,7 +535,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
|
||||
/* Report "alive" for developer to see */
|
||||
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
|
||||
caam_get_era(caam_id));
|
||||
caam_get_era());
|
||||
dev_info(dev, "job rings = %d, qi = %d\n",
|
||||
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
|
||||
|
||||
|
@@ -8,6 +8,6 @@
|
||||
#define CTRL_H
|
||||
|
||||
/* Prototypes for backend-level services exposed to APIs */
|
||||
int caam_get_era(u64 caam_id);
|
||||
int caam_get_era(void);
|
||||
|
||||
#endif /* CTRL_H */
|
||||
|
@@ -155,21 +155,29 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
|
||||
append_data(desc, data, len);
|
||||
}
|
||||
|
||||
static inline u32 *append_jump(u32 *desc, u32 options)
|
||||
{
|
||||
u32 *cmd = desc_end(desc);
|
||||
|
||||
PRINT_POS;
|
||||
append_cmd(desc, CMD_JUMP | options);
|
||||
|
||||
return cmd;
|
||||
#define APPEND_CMD_RET(cmd, op) \
|
||||
static inline u32 *append_##cmd(u32 *desc, u32 options) \
|
||||
{ \
|
||||
u32 *cmd = desc_end(desc); \
|
||||
PRINT_POS; \
|
||||
append_cmd(desc, CMD_##op | options); \
|
||||
return cmd; \
|
||||
}
|
||||
APPEND_CMD_RET(jump, JUMP)
|
||||
APPEND_CMD_RET(move, MOVE)
|
||||
|
||||
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
|
||||
{
|
||||
*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
|
||||
}
|
||||
|
||||
static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
|
||||
{
|
||||
*move_cmd &= ~MOVE_OFFSET_MASK;
|
||||
*move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
|
||||
MOVE_OFFSET_MASK);
|
||||
}
|
||||
|
||||
#define APPEND_CMD(cmd, op) \
|
||||
static inline void append_##cmd(u32 *desc, u32 options) \
|
||||
{ \
|
||||
@@ -177,7 +185,6 @@ static inline void append_##cmd(u32 *desc, u32 options) \
|
||||
append_cmd(desc, CMD_##op | options); \
|
||||
}
|
||||
APPEND_CMD(operation, OPERATION)
|
||||
APPEND_CMD(move, MOVE)
|
||||
|
||||
#define APPEND_CMD_LEN(cmd, op) \
|
||||
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
|
||||
@@ -328,7 +335,7 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
|
||||
do { \
|
||||
APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
|
||||
append_cmd(desc, data); \
|
||||
} while (0);
|
||||
} while (0)
|
||||
|
||||
#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
|
||||
APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
|
||||
|
@@ -74,10 +74,10 @@
|
||||
#endif
|
||||
#else
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#define wr_reg32(reg, data) __raw_writel(reg, data)
|
||||
#define wr_reg32(reg, data) __raw_writel(data, reg)
|
||||
#define rd_reg32(reg) __raw_readl(reg)
|
||||
#ifdef CONFIG_64BIT
|
||||
#define wr_reg64(reg, data) __raw_writeq(reg, data)
|
||||
#define wr_reg64(reg, data) __raw_writeq(data, reg)
|
||||
#define rd_reg64(reg) __raw_readq(reg)
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/ccp.h>
|
||||
@@ -24,28 +25,33 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.0.0");
|
||||
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
|
||||
|
||||
static unsigned int aes_disable;
|
||||
module_param(aes_disable, uint, 0444);
|
||||
MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
|
||||
|
||||
static unsigned int sha_disable;
|
||||
module_param(sha_disable, uint, 0444);
|
||||
MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
|
||||
|
||||
|
||||
/* List heads for the supported algorithms */
|
||||
static LIST_HEAD(hash_algs);
|
||||
static LIST_HEAD(cipher_algs);
|
||||
|
||||
/* For any tfm, requests for that tfm on the same CPU must be returned
|
||||
* in the order received. With multiple queues available, the CCP can
|
||||
* process more than one cmd at a time. Therefore we must maintain
|
||||
* a cmd list to insure the proper ordering of requests on a given tfm/cpu
|
||||
* combination.
|
||||
/* For any tfm, requests for that tfm must be returned on the order
|
||||
* received. With multiple queues available, the CCP can process more
|
||||
* than one cmd at a time. Therefore we must maintain a cmd list to insure
|
||||
* the proper ordering of requests on a given tfm.
|
||||
*/
|
||||
struct ccp_crypto_cpu_queue {
|
||||
struct ccp_crypto_queue {
|
||||
struct list_head cmds;
|
||||
struct list_head *backlog;
|
||||
unsigned int cmd_count;
|
||||
};
|
||||
#define CCP_CRYPTO_MAX_QLEN 50
|
||||
#define CCP_CRYPTO_MAX_QLEN 100
|
||||
|
||||
struct ccp_crypto_percpu_queue {
|
||||
struct ccp_crypto_cpu_queue __percpu *cpu_queue;
|
||||
};
|
||||
static struct ccp_crypto_percpu_queue req_queue;
|
||||
static struct ccp_crypto_queue req_queue;
|
||||
static spinlock_t req_queue_lock;
|
||||
|
||||
struct ccp_crypto_cmd {
|
||||
struct list_head entry;
|
||||
@@ -62,8 +68,6 @@ struct ccp_crypto_cmd {
|
||||
|
||||
/* Used for held command processing to determine state */
|
||||
int ret;
|
||||
|
||||
int cpu;
|
||||
};
|
||||
|
||||
struct ccp_crypto_cpu {
|
||||
@@ -82,25 +86,21 @@ static inline bool ccp_crypto_success(int err)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* ccp_crypto_cmd_complete must be called while running on the appropriate
|
||||
* cpu and the caller must have done a get_cpu to disable preemption
|
||||
*/
|
||||
static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
|
||||
struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
|
||||
{
|
||||
struct ccp_crypto_cpu_queue *cpu_queue;
|
||||
struct ccp_crypto_cmd *held = NULL, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
*backlog = NULL;
|
||||
|
||||
cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
|
||||
spin_lock_irqsave(&req_queue_lock, flags);
|
||||
|
||||
/* Held cmds will be after the current cmd in the queue so start
|
||||
* searching for a cmd with a matching tfm for submission.
|
||||
*/
|
||||
tmp = crypto_cmd;
|
||||
list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
|
||||
list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
|
||||
if (crypto_cmd->tfm != tmp->tfm)
|
||||
continue;
|
||||
held = tmp;
|
||||
@@ -111,47 +111,45 @@ static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
|
||||
* Because cmds can be executed from any point in the cmd list
|
||||
* special precautions have to be taken when handling the backlog.
|
||||
*/
|
||||
if (cpu_queue->backlog != &cpu_queue->cmds) {
|
||||
if (req_queue.backlog != &req_queue.cmds) {
|
||||
/* Skip over this cmd if it is the next backlog cmd */
|
||||
if (cpu_queue->backlog == &crypto_cmd->entry)
|
||||
cpu_queue->backlog = crypto_cmd->entry.next;
|
||||
if (req_queue.backlog == &crypto_cmd->entry)
|
||||
req_queue.backlog = crypto_cmd->entry.next;
|
||||
|
||||
*backlog = container_of(cpu_queue->backlog,
|
||||
*backlog = container_of(req_queue.backlog,
|
||||
struct ccp_crypto_cmd, entry);
|
||||
cpu_queue->backlog = cpu_queue->backlog->next;
|
||||
req_queue.backlog = req_queue.backlog->next;
|
||||
|
||||
/* Skip over this cmd if it is now the next backlog cmd */
|
||||
if (cpu_queue->backlog == &crypto_cmd->entry)
|
||||
cpu_queue->backlog = crypto_cmd->entry.next;
|
||||
if (req_queue.backlog == &crypto_cmd->entry)
|
||||
req_queue.backlog = crypto_cmd->entry.next;
|
||||
}
|
||||
|
||||
/* Remove the cmd entry from the list of cmds */
|
||||
cpu_queue->cmd_count--;
|
||||
req_queue.cmd_count--;
|
||||
list_del(&crypto_cmd->entry);
|
||||
|
||||
spin_unlock_irqrestore(&req_queue_lock, flags);
|
||||
|
||||
return held;
|
||||
}
|
||||
|
||||
static void ccp_crypto_complete_on_cpu(struct work_struct *work)
|
||||
static void ccp_crypto_complete(void *data, int err)
|
||||
{
|
||||
struct ccp_crypto_cpu *cpu_work =
|
||||
container_of(work, struct ccp_crypto_cpu, work);
|
||||
struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
|
||||
struct ccp_crypto_cmd *crypto_cmd = data;
|
||||
struct ccp_crypto_cmd *held, *next, *backlog;
|
||||
struct crypto_async_request *req = crypto_cmd->req;
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
|
||||
int cpu, ret;
|
||||
int ret;
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (cpu_work->err == -EINPROGRESS) {
|
||||
if (err == -EINPROGRESS) {
|
||||
/* Only propogate the -EINPROGRESS if necessary */
|
||||
if (crypto_cmd->ret == -EBUSY) {
|
||||
crypto_cmd->ret = -EINPROGRESS;
|
||||
req->complete(req, -EINPROGRESS);
|
||||
}
|
||||
|
||||
goto e_cpu;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Operation has completed - update the queue before invoking
|
||||
@@ -169,18 +167,25 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
|
||||
req->complete(req, -EINPROGRESS);
|
||||
|
||||
/* Completion callbacks */
|
||||
ret = cpu_work->err;
|
||||
ret = err;
|
||||
if (ctx->complete)
|
||||
ret = ctx->complete(req, ret);
|
||||
req->complete(req, ret);
|
||||
|
||||
/* Submit the next cmd */
|
||||
while (held) {
|
||||
/* Since we have already queued the cmd, we must indicate that
|
||||
* we can backlog so as not to "lose" this request.
|
||||
*/
|
||||
held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
|
||||
ret = ccp_enqueue_cmd(held->cmd);
|
||||
if (ccp_crypto_success(ret))
|
||||
break;
|
||||
|
||||
/* Error occurred, report it and get the next entry */
|
||||
ctx = crypto_tfm_ctx(held->req->tfm);
|
||||
if (ctx->complete)
|
||||
ret = ctx->complete(held->req, ret);
|
||||
held->req->complete(held->req, ret);
|
||||
|
||||
next = ccp_crypto_cmd_complete(held, &backlog);
|
||||
@@ -194,52 +199,29 @@ static void ccp_crypto_complete_on_cpu(struct work_struct *work)
|
||||
}
|
||||
|
||||
kfree(crypto_cmd);
|
||||
|
||||
e_cpu:
|
||||
put_cpu();
|
||||
|
||||
complete(&cpu_work->completion);
|
||||
}
|
||||
|
||||
static void ccp_crypto_complete(void *data, int err)
|
||||
{
|
||||
struct ccp_crypto_cmd *crypto_cmd = data;
|
||||
struct ccp_crypto_cpu cpu_work;
|
||||
|
||||
INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
|
||||
init_completion(&cpu_work.completion);
|
||||
cpu_work.crypto_cmd = crypto_cmd;
|
||||
cpu_work.err = err;
|
||||
|
||||
schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
|
||||
|
||||
/* Keep the completion call synchronous */
|
||||
wait_for_completion(&cpu_work.completion);
|
||||
}
|
||||
|
||||
static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
|
||||
{
|
||||
struct ccp_crypto_cpu_queue *cpu_queue;
|
||||
struct ccp_crypto_cmd *active = NULL, *tmp;
|
||||
int cpu, ret;
|
||||
unsigned long flags;
|
||||
bool free_cmd = true;
|
||||
int ret;
|
||||
|
||||
cpu = get_cpu();
|
||||
crypto_cmd->cpu = cpu;
|
||||
|
||||
cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
|
||||
spin_lock_irqsave(&req_queue_lock, flags);
|
||||
|
||||
/* Check if the cmd can/should be queued */
|
||||
if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
ret = -EBUSY;
|
||||
if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
|
||||
goto e_cpu;
|
||||
goto e_lock;
|
||||
}
|
||||
|
||||
/* Look for an entry with the same tfm. If there is a cmd
|
||||
* with the same tfm in the list for this cpu then the current
|
||||
* cmd cannot be submitted to the CCP yet.
|
||||
* with the same tfm in the list then the current cmd cannot
|
||||
* be submitted to the CCP yet.
|
||||
*/
|
||||
list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
|
||||
list_for_each_entry(tmp, &req_queue.cmds, entry) {
|
||||
if (crypto_cmd->tfm != tmp->tfm)
|
||||
continue;
|
||||
active = tmp;
|
||||
@@ -250,21 +232,29 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
|
||||
if (!active) {
|
||||
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
|
||||
if (!ccp_crypto_success(ret))
|
||||
goto e_cpu;
|
||||
goto e_lock; /* Error, don't queue it */
|
||||
if ((ret == -EBUSY) &&
|
||||
!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
|
||||
goto e_lock; /* Not backlogging, don't queue it */
|
||||
}
|
||||
|
||||
if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
|
||||
ret = -EBUSY;
|
||||
if (cpu_queue->backlog == &cpu_queue->cmds)
|
||||
cpu_queue->backlog = &crypto_cmd->entry;
|
||||
if (req_queue.backlog == &req_queue.cmds)
|
||||
req_queue.backlog = &crypto_cmd->entry;
|
||||
}
|
||||
crypto_cmd->ret = ret;
|
||||
|
||||
cpu_queue->cmd_count++;
|
||||
list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
|
||||
req_queue.cmd_count++;
|
||||
list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
|
||||
|
||||
e_cpu:
|
||||
put_cpu();
|
||||
free_cmd = false;
|
||||
|
||||
e_lock:
|
||||
spin_unlock_irqrestore(&req_queue_lock, flags);
|
||||
|
||||
if (free_cmd)
|
||||
kfree(crypto_cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -281,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
|
||||
{
|
||||
struct ccp_crypto_cmd *crypto_cmd;
|
||||
gfp_t gfp;
|
||||
int ret;
|
||||
|
||||
gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
@@ -306,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req,
|
||||
else
|
||||
cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
|
||||
|
||||
ret = ccp_crypto_enqueue_cmd(crypto_cmd);
|
||||
if (!ccp_crypto_success(ret))
|
||||
kfree(crypto_cmd);
|
||||
|
||||
return ret;
|
||||
return ccp_crypto_enqueue_cmd(crypto_cmd);
|
||||
}
|
||||
|
||||
struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
|
||||
@@ -337,21 +322,25 @@ static int ccp_register_algs(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ccp_register_aes_algs(&cipher_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!aes_disable) {
|
||||
ret = ccp_register_aes_algs(&cipher_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ccp_register_aes_cmac_algs(&hash_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ccp_register_aes_cmac_algs(&hash_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ccp_register_aes_xts_algs(&cipher_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ccp_register_aes_xts_algs(&cipher_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ccp_register_sha_algs(&hash_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!sha_disable) {
|
||||
ret = ccp_register_sha_algs(&hash_algs);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -374,50 +363,18 @@ static void ccp_unregister_algs(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int ccp_init_queues(void)
|
||||
{
|
||||
struct ccp_crypto_cpu_queue *cpu_queue;
|
||||
int cpu;
|
||||
|
||||
req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
|
||||
if (!req_queue.cpu_queue)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
|
||||
INIT_LIST_HEAD(&cpu_queue->cmds);
|
||||
cpu_queue->backlog = &cpu_queue->cmds;
|
||||
cpu_queue->cmd_count = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ccp_fini_queue(void)
|
||||
{
|
||||
struct ccp_crypto_cpu_queue *cpu_queue;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
|
||||
BUG_ON(!list_empty(&cpu_queue->cmds));
|
||||
}
|
||||
free_percpu(req_queue.cpu_queue);
|
||||
}
|
||||
|
||||
static int ccp_crypto_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ccp_init_queues();
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock_init(&req_queue_lock);
|
||||
INIT_LIST_HEAD(&req_queue.cmds);
|
||||
req_queue.backlog = &req_queue.cmds;
|
||||
req_queue.cmd_count = 0;
|
||||
|
||||
ret = ccp_register_algs();
|
||||
if (ret) {
|
||||
if (ret)
|
||||
ccp_unregister_algs();
|
||||
ccp_fini_queue();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -425,7 +382,6 @@ static int ccp_crypto_init(void)
|
||||
static void ccp_crypto_exit(void)
|
||||
{
|
||||
ccp_unregister_algs();
|
||||
ccp_fini_queue();
|
||||
}
|
||||
|
||||
module_init(ccp_crypto_init);
|
||||
|
@@ -24,75 +24,10 @@
|
||||
#include "ccp-crypto.h"
|
||||
|
||||
|
||||
struct ccp_sha_result {
|
||||
struct completion completion;
|
||||
int err;
|
||||
};
|
||||
|
||||
static void ccp_sync_hash_complete(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct ccp_sha_result *result = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
result->err = err;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf,
|
||||
struct scatterlist *sg, unsigned int len)
|
||||
{
|
||||
struct ccp_sha_result result;
|
||||
struct ahash_request *req;
|
||||
int ret;
|
||||
|
||||
init_completion(&result.completion);
|
||||
|
||||
req = ahash_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
ccp_sync_hash_complete, &result);
|
||||
ahash_request_set_crypt(req, sg, buf, len);
|
||||
|
||||
ret = crypto_ahash_digest(req);
|
||||
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
|
||||
ret = wait_for_completion_interruptible(&result.completion);
|
||||
if (!ret)
|
||||
ret = result.err;
|
||||
}
|
||||
|
||||
ahash_request_free(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ccp_sha_finish_hmac(struct crypto_async_request *async_req)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async_req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct scatterlist sg[2];
|
||||
unsigned int block_size =
|
||||
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
||||
unsigned int digest_size = crypto_ahash_digestsize(tfm);
|
||||
|
||||
sg_init_table(sg, ARRAY_SIZE(sg));
|
||||
sg_set_buf(&sg[0], ctx->u.sha.opad, block_size);
|
||||
sg_set_buf(&sg[1], rctx->ctx, digest_size);
|
||||
|
||||
return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg,
|
||||
block_size + digest_size);
|
||||
}
|
||||
|
||||
static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async_req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
unsigned int digest_size = crypto_ahash_digestsize(tfm);
|
||||
|
||||
@@ -112,10 +47,6 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
|
||||
if (req->result)
|
||||
memcpy(req->result, rctx->ctx, digest_size);
|
||||
|
||||
/* If we're doing an HMAC, we need to perform that on the final op */
|
||||
if (rctx->final && ctx->u.sha.key_len)
|
||||
ret = ccp_sha_finish_hmac(async_req);
|
||||
|
||||
e_free:
|
||||
sg_free_table(&rctx->data_sg);
|
||||
|
||||
@@ -126,6 +57,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
|
||||
unsigned int final)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct scatterlist *sg;
|
||||
unsigned int block_size =
|
||||
@@ -196,6 +128,11 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
|
||||
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
|
||||
rctx->cmd.u.sha.src = sg;
|
||||
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
|
||||
rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
|
||||
&ctx->u.sha.opad_sg : NULL;
|
||||
rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
|
||||
ctx->u.sha.opad_count : 0;
|
||||
rctx->cmd.u.sha.first = rctx->first;
|
||||
rctx->cmd.u.sha.final = rctx->final;
|
||||
rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
|
||||
|
||||
@@ -218,7 +155,6 @@ static int ccp_sha_init(struct ahash_request *req)
|
||||
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
|
||||
memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx));
|
||||
rctx->type = alg->type;
|
||||
rctx->first = 1;
|
||||
|
||||
@@ -261,10 +197,13 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
||||
struct scatterlist sg;
|
||||
unsigned int block_size =
|
||||
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
||||
unsigned int digest_size = crypto_ahash_digestsize(tfm);
|
||||
struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
|
||||
struct {
|
||||
struct shash_desc sdesc;
|
||||
char ctx[crypto_shash_descsize(shash)];
|
||||
} desc;
|
||||
unsigned int block_size = crypto_shash_blocksize(shash);
|
||||
unsigned int digest_size = crypto_shash_digestsize(shash);
|
||||
int i, ret;
|
||||
|
||||
/* Set to zero until complete */
|
||||
@@ -277,8 +216,12 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
|
||||
if (key_len > block_size) {
|
||||
/* Must hash the input key */
|
||||
sg_init_one(&sg, key, key_len);
|
||||
ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len);
|
||||
desc.sdesc.tfm = shash;
|
||||
desc.sdesc.flags = crypto_ahash_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
ret = crypto_shash_digest(&desc.sdesc, key, key_len,
|
||||
ctx->u.sha.key);
|
||||
if (ret) {
|
||||
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
@@ -293,6 +236,9 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
|
||||
}
|
||||
|
||||
sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
|
||||
ctx->u.sha.opad_count = block_size;
|
||||
|
||||
ctx->u.sha.key_len = key_len;
|
||||
|
||||
return 0;
|
||||
@@ -319,10 +265,9 @@ static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
|
||||
struct crypto_ahash *hmac_tfm;
|
||||
struct crypto_shash *hmac_tfm;
|
||||
|
||||
hmac_tfm = crypto_alloc_ahash(alg->child_alg,
|
||||
CRYPTO_ALG_TYPE_AHASH, 0);
|
||||
hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
|
||||
if (IS_ERR(hmac_tfm)) {
|
||||
pr_warn("could not load driver %s need for HMAC support\n",
|
||||
alg->child_alg);
|
||||
@@ -339,35 +284,14 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->u.sha.hmac_tfm)
|
||||
crypto_free_ahash(ctx->u.sha.hmac_tfm);
|
||||
crypto_free_shash(ctx->u.sha.hmac_tfm);
|
||||
|
||||
ccp_sha_cra_exit(tfm);
|
||||
}
|
||||
|
||||
static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
|
||||
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
|
||||
cpu_to_be32(SHA1_H4), 0, 0, 0,
|
||||
};
|
||||
|
||||
static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
|
||||
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
|
||||
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
|
||||
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
|
||||
};
|
||||
|
||||
static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
|
||||
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
|
||||
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
|
||||
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
|
||||
};
|
||||
|
||||
struct ccp_sha_def {
|
||||
const char *name;
|
||||
const char *drv_name;
|
||||
const __be32 *init;
|
||||
enum ccp_sha_type type;
|
||||
u32 digest_size;
|
||||
u32 block_size;
|
||||
@@ -377,7 +301,6 @@ static struct ccp_sha_def sha_algs[] = {
|
||||
{
|
||||
.name = "sha1",
|
||||
.drv_name = "sha1-ccp",
|
||||
.init = sha1_init,
|
||||
.type = CCP_SHA_TYPE_1,
|
||||
.digest_size = SHA1_DIGEST_SIZE,
|
||||
.block_size = SHA1_BLOCK_SIZE,
|
||||
@@ -385,7 +308,6 @@ static struct ccp_sha_def sha_algs[] = {
|
||||
{
|
||||
.name = "sha224",
|
||||
.drv_name = "sha224-ccp",
|
||||
.init = sha224_init,
|
||||
.type = CCP_SHA_TYPE_224,
|
||||
.digest_size = SHA224_DIGEST_SIZE,
|
||||
.block_size = SHA224_BLOCK_SIZE,
|
||||
@@ -393,7 +315,6 @@ static struct ccp_sha_def sha_algs[] = {
|
||||
{
|
||||
.name = "sha256",
|
||||
.drv_name = "sha256-ccp",
|
||||
.init = sha256_init,
|
||||
.type = CCP_SHA_TYPE_256,
|
||||
.digest_size = SHA256_DIGEST_SIZE,
|
||||
.block_size = SHA256_BLOCK_SIZE,
|
||||
@@ -460,7 +381,6 @@ static int ccp_register_sha_alg(struct list_head *head,
|
||||
|
||||
INIT_LIST_HEAD(&ccp_alg->entry);
|
||||
|
||||
ccp_alg->init = def->init;
|
||||
ccp_alg->type = def->type;
|
||||
|
||||
alg = &ccp_alg->alg;
|
||||
|
@@ -137,11 +137,14 @@ struct ccp_aes_cmac_req_ctx {
|
||||
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
|
||||
|
||||
struct ccp_sha_ctx {
|
||||
struct scatterlist opad_sg;
|
||||
unsigned int opad_count;
|
||||
|
||||
unsigned int key_len;
|
||||
u8 key[MAX_SHA_BLOCK_SIZE];
|
||||
u8 ipad[MAX_SHA_BLOCK_SIZE];
|
||||
u8 opad[MAX_SHA_BLOCK_SIZE];
|
||||
struct crypto_ahash *hmac_tfm;
|
||||
struct crypto_shash *hmac_tfm;
|
||||
};
|
||||
|
||||
struct ccp_sha_req_ctx {
|
||||
@@ -167,9 +170,6 @@ struct ccp_sha_req_ctx {
|
||||
unsigned int buf_count;
|
||||
u8 buf[MAX_SHA_BLOCK_SIZE];
|
||||
|
||||
/* HMAC support field */
|
||||
struct scatterlist pad_sg;
|
||||
|
||||
/* CCP driver command */
|
||||
struct ccp_cmd cmd;
|
||||
};
|
||||
|
@@ -30,6 +30,11 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION("1.0.0");
|
||||
MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
|
||||
|
||||
struct ccp_tasklet_data {
|
||||
struct completion completion;
|
||||
struct ccp_cmd *cmd;
|
||||
};
|
||||
|
||||
|
||||
static struct ccp_device *ccp_dev;
|
||||
static inline struct ccp_device *ccp_get_device(void)
|
||||
@@ -192,17 +197,23 @@ static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static void ccp_do_cmd_complete(struct work_struct *work)
|
||||
static void ccp_do_cmd_complete(unsigned long data)
|
||||
{
|
||||
struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
|
||||
struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
|
||||
struct ccp_cmd *cmd = tdata->cmd;
|
||||
|
||||
cmd->callback(cmd->data, cmd->ret);
|
||||
complete(&tdata->completion);
|
||||
}
|
||||
|
||||
static int ccp_cmd_queue_thread(void *data)
|
||||
{
|
||||
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
|
||||
struct ccp_cmd *cmd;
|
||||
struct ccp_tasklet_data tdata;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
@@ -220,8 +231,10 @@ static int ccp_cmd_queue_thread(void *data)
|
||||
cmd->ret = ccp_run_cmd(cmd_q, cmd);
|
||||
|
||||
/* Schedule the completion callback */
|
||||
INIT_WORK(&cmd->work, ccp_do_cmd_complete);
|
||||
schedule_work(&cmd->work);
|
||||
tdata.cmd = cmd;
|
||||
init_completion(&tdata.completion);
|
||||
tasklet_schedule(&tasklet);
|
||||
wait_for_completion(&tdata.completion);
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/ccp.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
#include "ccp-dev.h"
|
||||
|
||||
@@ -132,6 +133,27 @@ struct ccp_op {
|
||||
} u;
|
||||
};
|
||||
|
||||
/* SHA initial context values */
|
||||
static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
|
||||
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
|
||||
cpu_to_be32(SHA1_H4), 0, 0, 0,
|
||||
};
|
||||
|
||||
static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
|
||||
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
|
||||
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
|
||||
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
|
||||
};
|
||||
|
||||
static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
|
||||
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
|
||||
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
|
||||
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
|
||||
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
|
||||
};
|
||||
|
||||
/* The CCP cannot perform zero-length sha operations so the caller
|
||||
* is required to buffer data for the final operation. However, a
|
||||
* sha operation for a message with a total length of zero is valid
|
||||
@@ -1411,7 +1433,27 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
|
||||
if (sha->first) {
|
||||
const __be32 *init;
|
||||
|
||||
switch (sha->type) {
|
||||
case CCP_SHA_TYPE_1:
|
||||
init = ccp_sha1_init;
|
||||
break;
|
||||
case CCP_SHA_TYPE_224:
|
||||
init = ccp_sha224_init;
|
||||
break;
|
||||
case CCP_SHA_TYPE_256:
|
||||
init = ccp_sha256_init;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto e_ctx;
|
||||
}
|
||||
memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
|
||||
} else
|
||||
ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
|
||||
|
||||
ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
|
||||
CCP_PASSTHRU_BYTESWAP_256BIT);
|
||||
if (ret) {
|
||||
@@ -1451,6 +1493,66 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
|
||||
ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
|
||||
|
||||
if (sha->final && sha->opad) {
|
||||
/* HMAC operation, recursively perform final SHA */
|
||||
struct ccp_cmd hmac_cmd;
|
||||
struct scatterlist sg;
|
||||
u64 block_size, digest_size;
|
||||
u8 *hmac_buf;
|
||||
|
||||
switch (sha->type) {
|
||||
case CCP_SHA_TYPE_1:
|
||||
block_size = SHA1_BLOCK_SIZE;
|
||||
digest_size = SHA1_DIGEST_SIZE;
|
||||
break;
|
||||
case CCP_SHA_TYPE_224:
|
||||
block_size = SHA224_BLOCK_SIZE;
|
||||
digest_size = SHA224_DIGEST_SIZE;
|
||||
break;
|
||||
case CCP_SHA_TYPE_256:
|
||||
block_size = SHA256_BLOCK_SIZE;
|
||||
digest_size = SHA256_DIGEST_SIZE;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto e_data;
|
||||
}
|
||||
|
||||
if (sha->opad_len != block_size) {
|
||||
ret = -EINVAL;
|
||||
goto e_data;
|
||||
}
|
||||
|
||||
hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
|
||||
if (!hmac_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto e_data;
|
||||
}
|
||||
sg_init_one(&sg, hmac_buf, block_size + digest_size);
|
||||
|
||||
scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
|
||||
memcpy(hmac_buf + block_size, ctx.address, digest_size);
|
||||
|
||||
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
|
||||
hmac_cmd.engine = CCP_ENGINE_SHA;
|
||||
hmac_cmd.u.sha.type = sha->type;
|
||||
hmac_cmd.u.sha.ctx = sha->ctx;
|
||||
hmac_cmd.u.sha.ctx_len = sha->ctx_len;
|
||||
hmac_cmd.u.sha.src = &sg;
|
||||
hmac_cmd.u.sha.src_len = block_size + digest_size;
|
||||
hmac_cmd.u.sha.opad = NULL;
|
||||
hmac_cmd.u.sha.opad_len = 0;
|
||||
hmac_cmd.u.sha.first = 1;
|
||||
hmac_cmd.u.sha.final = 1;
|
||||
hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
|
||||
|
||||
ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
|
||||
if (ret)
|
||||
cmd->engine_error = hmac_cmd.engine_error;
|
||||
|
||||
kfree(hmac_buf);
|
||||
}
|
||||
|
||||
e_data:
|
||||
ccp_free_data(&src, cmd_q);
|
||||
|
||||
@@ -1666,8 +1768,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
|
||||
|
||||
op.dst.type = CCP_MEMTYPE_SYSTEM;
|
||||
op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
|
||||
op.src.u.dma.offset = dst.sg_wa.sg_used;
|
||||
op.src.u.dma.length = op.src.u.dma.length;
|
||||
op.dst.u.dma.offset = dst.sg_wa.sg_used;
|
||||
op.dst.u.dma.length = op.src.u.dma.length;
|
||||
|
||||
ret = ccp_perform_passthru(&op);
|
||||
if (ret) {
|
||||
|
@@ -29,6 +29,8 @@
|
||||
#define DCP_MAX_CHANS 4
|
||||
#define DCP_BUF_SZ PAGE_SIZE
|
||||
|
||||
#define DCP_ALIGNMENT 64
|
||||
|
||||
/* DCP DMA descriptor. */
|
||||
struct dcp_dma_desc {
|
||||
uint32_t next_cmd_addr;
|
||||
@@ -48,7 +50,6 @@ struct dcp_coherent_block {
|
||||
uint8_t sha_in_buf[DCP_BUF_SZ];
|
||||
|
||||
uint8_t aes_key[2 * AES_KEYSIZE_128];
|
||||
uint8_t sha_digest[SHA256_DIGEST_SIZE];
|
||||
|
||||
struct dcp_dma_desc desc[DCP_MAX_CHANS];
|
||||
};
|
||||
@@ -83,13 +84,16 @@ struct dcp_async_ctx {
|
||||
unsigned int hot:1;
|
||||
|
||||
/* Crypto-specific context */
|
||||
unsigned int enc:1;
|
||||
unsigned int ecb:1;
|
||||
struct crypto_ablkcipher *fallback;
|
||||
unsigned int key_len;
|
||||
uint8_t key[AES_KEYSIZE_128];
|
||||
};
|
||||
|
||||
struct dcp_aes_req_ctx {
|
||||
unsigned int enc:1;
|
||||
unsigned int ecb:1;
|
||||
};
|
||||
|
||||
struct dcp_sha_req_ctx {
|
||||
unsigned int init:1;
|
||||
unsigned int fini:1;
|
||||
@@ -190,10 +194,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
|
||||
/*
|
||||
* Encryption (AES128)
|
||||
*/
|
||||
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
|
||||
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
|
||||
struct ablkcipher_request *req, int init)
|
||||
{
|
||||
struct dcp *sdcp = global_sdcp;
|
||||
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
|
||||
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
int ret;
|
||||
|
||||
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
|
||||
@@ -212,14 +218,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
|
||||
/* Payload contains the key. */
|
||||
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
|
||||
|
||||
if (actx->enc)
|
||||
if (rctx->enc)
|
||||
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
|
||||
if (init)
|
||||
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
|
||||
|
||||
desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
|
||||
|
||||
if (actx->ecb)
|
||||
if (rctx->ecb)
|
||||
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
|
||||
else
|
||||
desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
|
||||
@@ -247,6 +253,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(arq);
|
||||
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
|
||||
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist *src = req->src;
|
||||
@@ -271,7 +278,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
/* Copy the key from the temporary location. */
|
||||
memcpy(key, actx->key, actx->key_len);
|
||||
|
||||
if (!actx->ecb) {
|
||||
if (!rctx->ecb) {
|
||||
/* Copy the CBC IV just past the key. */
|
||||
memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
|
||||
/* CBC needs the INIT set. */
|
||||
@@ -300,7 +307,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
* submit the buffer.
|
||||
*/
|
||||
if (actx->fill == out_off || sg_is_last(src)) {
|
||||
ret = mxs_dcp_run_aes(actx, init);
|
||||
ret = mxs_dcp_run_aes(actx, req, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
init = 0;
|
||||
@@ -391,13 +398,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
||||
struct dcp *sdcp = global_sdcp;
|
||||
struct crypto_async_request *arq = &req->base;
|
||||
struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
|
||||
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
int ret;
|
||||
|
||||
if (unlikely(actx->key_len != AES_KEYSIZE_128))
|
||||
return mxs_dcp_block_fallback(req, enc);
|
||||
|
||||
actx->enc = enc;
|
||||
actx->ecb = ecb;
|
||||
rctx->enc = enc;
|
||||
rctx->ecb = ecb;
|
||||
actx->chan = DCP_CHAN_CRYPTO;
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
@@ -484,7 +492,7 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(blk);
|
||||
|
||||
actx->fallback = blk;
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx);
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -507,13 +515,11 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
|
||||
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
|
||||
|
||||
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
|
||||
dma_addr_t digest_phys = dma_map_single(sdcp->dev,
|
||||
sdcp->coh->sha_digest,
|
||||
SHA256_DIGEST_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dma_addr_t digest_phys = 0;
|
||||
dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
|
||||
DCP_BUF_SZ, DMA_TO_DEVICE);
|
||||
|
||||
@@ -534,14 +540,18 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
|
||||
|
||||
/* Set HASH_TERM bit for last transfer block. */
|
||||
if (rctx->fini) {
|
||||
digest_phys = dma_map_single(sdcp->dev, req->result,
|
||||
halg->digestsize, DMA_FROM_DEVICE);
|
||||
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
|
||||
desc->payload = digest_phys;
|
||||
}
|
||||
|
||||
ret = mxs_dcp_start_dma(actx);
|
||||
|
||||
dma_unmap_single(sdcp->dev, digest_phys, SHA256_DIGEST_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (rctx->fini)
|
||||
dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
|
||||
|
||||
return ret;
|
||||
@@ -558,7 +568,6 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
|
||||
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
|
||||
const int nents = sg_nents(req->src);
|
||||
|
||||
uint8_t *digest = sdcp->coh->sha_digest;
|
||||
uint8_t *in_buf = sdcp->coh->sha_in_buf;
|
||||
|
||||
uint8_t *src_buf;
|
||||
@@ -605,14 +614,20 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
|
||||
rctx->fini = 1;
|
||||
|
||||
/* Submit whatever is left. */
|
||||
if (!req->result)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mxs_dcp_run_sha(req);
|
||||
if (ret || !req->result)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
actx->fill = 0;
|
||||
|
||||
/* For some reason, the result is flipped. */
|
||||
for (i = 0; i < halg->digestsize; i++)
|
||||
req->result[i] = digest[halg->digestsize - i - 1];
|
||||
for (i = 0; i < halg->digestsize / 2; i++) {
|
||||
swap(req->result[i],
|
||||
req->result[halg->digestsize - i - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -901,9 +916,14 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
|
||||
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dcp_vmi_irq = platform_get_irq(pdev, 0);
|
||||
if (dcp_vmi_irq < 0) {
|
||||
ret = dcp_vmi_irq;
|
||||
goto err_mutex;
|
||||
}
|
||||
|
||||
dcp_irq = platform_get_irq(pdev, 1);
|
||||
if (dcp_vmi_irq < 0 || dcp_irq < 0) {
|
||||
ret = -EINVAL;
|
||||
if (dcp_irq < 0) {
|
||||
ret = dcp_irq;
|
||||
goto err_mutex;
|
||||
}
|
||||
|
||||
@@ -935,15 +955,20 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/* Allocate coherent helper block. */
|
||||
sdcp->coh = kzalloc(sizeof(struct dcp_coherent_block), GFP_KERNEL);
|
||||
sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
|
||||
GFP_KERNEL);
|
||||
if (!sdcp->coh) {
|
||||
dev_err(dev, "Error allocating coherent block\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_mutex;
|
||||
}
|
||||
|
||||
/* Re-align the structure so it fits the DCP constraints. */
|
||||
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
|
||||
|
||||
/* Restart the DCP block. */
|
||||
stmp_reset_block(sdcp->base);
|
||||
ret = stmp_reset_block(sdcp->base);
|
||||
if (ret)
|
||||
goto err_mutex;
|
||||
|
||||
/* Initialize control register. */
|
||||
writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
|
||||
@@ -982,7 +1007,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
|
||||
dev_err(dev, "Error starting SHA thread!\n");
|
||||
ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
|
||||
goto err_free_coherent;
|
||||
goto err_mutex;
|
||||
}
|
||||
|
||||
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
|
||||
@@ -1040,8 +1065,6 @@ err_destroy_aes_thread:
|
||||
err_destroy_sha_thread:
|
||||
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
|
||||
|
||||
err_free_coherent:
|
||||
kfree(sdcp->coh);
|
||||
err_mutex:
|
||||
mutex_unlock(&global_mutex);
|
||||
return ret;
|
||||
@@ -1051,8 +1074,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct dcp *sdcp = platform_get_drvdata(pdev);
|
||||
|
||||
kfree(sdcp->coh);
|
||||
|
||||
if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
|
||||
crypto_unregister_ahash(&dcp_sha256_alg);
|
||||
|
||||
|
@@ -1307,9 +1307,7 @@ static int omap_aes_resume(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops omap_aes_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
|
||||
};
|
||||
static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
|
||||
|
||||
static struct platform_driver omap_aes_driver = {
|
||||
.probe = omap_aes_probe,
|
||||
|
1216
drivers/crypto/omap-des.c
Normal file
1216
drivers/crypto/omap-des.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -636,11 +636,17 @@ static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
|
||||
static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
|
||||
{
|
||||
size_t count;
|
||||
const u8 *vaddr;
|
||||
|
||||
while (ctx->sg) {
|
||||
vaddr = kmap_atomic(sg_page(ctx->sg));
|
||||
|
||||
count = omap_sham_append_buffer(ctx,
|
||||
sg_virt(ctx->sg) + ctx->offset,
|
||||
vaddr + ctx->offset,
|
||||
ctx->sg->length - ctx->offset);
|
||||
|
||||
kunmap_atomic((void *)vaddr);
|
||||
|
||||
if (!count)
|
||||
break;
|
||||
ctx->offset += count;
|
||||
@@ -2022,9 +2028,7 @@ static int omap_sham_resume(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops omap_sham_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
|
||||
};
|
||||
static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
|
||||
|
||||
static struct platform_driver omap_sham_driver = {
|
||||
.probe = omap_sham_probe,
|
||||
|
@@ -1720,22 +1720,16 @@ static int spacc_probe(struct platform_device *pdev)
|
||||
engine->name = dev_name(&pdev->dev);
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
engine->regs = devm_ioremap_resource(&pdev->dev, mem);
|
||||
if (IS_ERR(engine->regs))
|
||||
return PTR_ERR(engine->regs);
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!mem || !irq) {
|
||||
if (!irq) {
|
||||
dev_err(&pdev->dev, "no memory/irq resource for engine\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
|
||||
engine->name))
|
||||
return -ENOMEM;
|
||||
|
||||
engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
|
||||
if (!engine->regs) {
|
||||
dev_err(&pdev->dev, "memory map failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
|
||||
engine->name, engine)) {
|
||||
dev_err(engine->dev, "failed to request IRQ\n");
|
||||
|
@@ -568,17 +568,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
if (s5p_dev)
|
||||
return -EEXIST;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
|
||||
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!devm_request_mem_region(dev, res->start,
|
||||
resource_size(res), pdev->name))
|
||||
return -EBUSY;
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(pdata->ioaddr))
|
||||
return PTR_ERR(pdata->ioaddr);
|
||||
|
||||
pdata->clk = devm_clk_get(dev, "secss");
|
||||
if (IS_ERR(pdata->clk)) {
|
||||
@@ -589,8 +586,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
clk_enable(pdata->clk);
|
||||
|
||||
spin_lock_init(&pdata->lock);
|
||||
pdata->ioaddr = devm_ioremap(dev, res->start,
|
||||
resource_size(res));
|
||||
|
||||
pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
|
||||
if (pdata->irq_hash < 0) {
|
||||
|
@@ -885,22 +885,9 @@ static int sahara_probe(struct platform_device *pdev)
|
||||
|
||||
/* Get the base address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "failed to get memory region resource\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (devm_request_mem_region(&pdev->dev, res->start,
|
||||
resource_size(res), SAHARA_NAME) == NULL) {
|
||||
dev_err(&pdev->dev, "failed to request memory region\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
dev->regs_base = devm_ioremap(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!dev->regs_base) {
|
||||
dev_err(&pdev->dev, "failed to ioremap address region\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(dev->regs_base))
|
||||
return PTR_ERR(dev->regs_base);
|
||||
|
||||
/* Get the IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
@@ -909,10 +896,11 @@ static int sahara_probe(struct platform_device *pdev)
|
||||
return irq;
|
||||
}
|
||||
|
||||
if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
|
||||
0, SAHARA_NAME, dev) < 0) {
|
||||
err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
|
||||
0, dev_name(&pdev->dev), dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to request irq\n");
|
||||
return -ENOENT;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* clocks */
|
||||
|
@@ -2637,6 +2637,8 @@ static int talitos_probe(struct platform_device *ofdev)
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&priv->alg_list);
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
priv->ofdev = ofdev;
|
||||
@@ -2657,8 +2659,6 @@ static int talitos_probe(struct platform_device *ofdev)
|
||||
(unsigned long)dev);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&priv->alg_list);
|
||||
|
||||
priv->reg = of_iomap(np, 0);
|
||||
if (!priv->reg) {
|
||||
dev_err(dev, "failed to of_iomap\n");
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,103 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010, NVIDIA Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __CRYPTODEV_TEGRA_AES_H
|
||||
#define __CRYPTODEV_TEGRA_AES_H
|
||||
|
||||
#define TEGRA_AES_ICMDQUE_WR 0x1000
|
||||
#define TEGRA_AES_CMDQUE_CONTROL 0x1008
|
||||
#define TEGRA_AES_INTR_STATUS 0x1018
|
||||
#define TEGRA_AES_INT_ENB 0x1040
|
||||
#define TEGRA_AES_CONFIG 0x1044
|
||||
#define TEGRA_AES_IRAM_ACCESS_CFG 0x10A0
|
||||
#define TEGRA_AES_SECURE_DEST_ADDR 0x1100
|
||||
#define TEGRA_AES_SECURE_INPUT_SELECT 0x1104
|
||||
#define TEGRA_AES_SECURE_CONFIG 0x1108
|
||||
#define TEGRA_AES_SECURE_CONFIG_EXT 0x110C
|
||||
#define TEGRA_AES_SECURE_SECURITY 0x1110
|
||||
#define TEGRA_AES_SECURE_HASH_RESULT0 0x1120
|
||||
#define TEGRA_AES_SECURE_HASH_RESULT1 0x1124
|
||||
#define TEGRA_AES_SECURE_HASH_RESULT2 0x1128
|
||||
#define TEGRA_AES_SECURE_HASH_RESULT3 0x112C
|
||||
#define TEGRA_AES_SECURE_SEC_SEL0 0x1140
|
||||
#define TEGRA_AES_SECURE_SEC_SEL1 0x1144
|
||||
#define TEGRA_AES_SECURE_SEC_SEL2 0x1148
|
||||
#define TEGRA_AES_SECURE_SEC_SEL3 0x114C
|
||||
#define TEGRA_AES_SECURE_SEC_SEL4 0x1150
|
||||
#define TEGRA_AES_SECURE_SEC_SEL5 0x1154
|
||||
#define TEGRA_AES_SECURE_SEC_SEL6 0x1158
|
||||
#define TEGRA_AES_SECURE_SEC_SEL7 0x115C
|
||||
|
||||
/* interrupt status reg masks and shifts */
|
||||
#define TEGRA_AES_ENGINE_BUSY_FIELD BIT(0)
|
||||
#define TEGRA_AES_ICQ_EMPTY_FIELD BIT(3)
|
||||
#define TEGRA_AES_DMA_BUSY_FIELD BIT(23)
|
||||
|
||||
/* secure select reg masks and shifts */
|
||||
#define TEGRA_AES_SECURE_SEL0_KEYREAD_ENB0_FIELD BIT(0)
|
||||
|
||||
/* secure config ext masks and shifts */
|
||||
#define TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD BIT(15)
|
||||
|
||||
/* secure config masks and shifts */
|
||||
#define TEGRA_AES_SECURE_KEY_INDEX_SHIFT 20
|
||||
#define TEGRA_AES_SECURE_KEY_INDEX_FIELD (0x1F << TEGRA_AES_SECURE_KEY_INDEX_SHIFT)
|
||||
#define TEGRA_AES_SECURE_BLOCK_CNT_SHIFT 0
|
||||
#define TEGRA_AES_SECURE_BLOCK_CNT_FIELD (0xFFFFF << TEGRA_AES_SECURE_BLOCK_CNT_SHIFT)
|
||||
|
||||
/* stream interface select masks and shifts */
|
||||
#define TEGRA_AES_CMDQ_CTRL_UCMDQEN_FIELD BIT(0)
|
||||
#define TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD BIT(1)
|
||||
#define TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD BIT(4)
|
||||
#define TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD BIT(5)
|
||||
|
||||
/* config register masks and shifts */
|
||||
#define TEGRA_AES_CONFIG_ENDIAN_ENB_FIELD BIT(10)
|
||||
#define TEGRA_AES_CONFIG_MODE_SEL_SHIFT 0
|
||||
#define TEGRA_AES_CONFIG_MODE_SEL_FIELD (0x1F << TEGRA_AES_CONFIG_MODE_SEL_SHIFT)
|
||||
|
||||
/* extended config */
|
||||
#define TEGRA_AES_SECURE_OFFSET_CNT_SHIFT 24
|
||||
#define TEGRA_AES_SECURE_OFFSET_CNT_FIELD (0xFF << TEGRA_AES_SECURE_OFFSET_CNT_SHIFT)
|
||||
#define TEGRA_AES_SECURE_KEYSCHED_GEN_FIELD BIT(15)
|
||||
|
||||
/* init vector select */
|
||||
#define TEGRA_AES_SECURE_IV_SELECT_SHIFT 10
|
||||
#define TEGRA_AES_SECURE_IV_SELECT_FIELD BIT(10)
|
||||
|
||||
/* secure engine input */
|
||||
#define TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT 28
|
||||
#define TEGRA_AES_SECURE_INPUT_ALG_SEL_FIELD (0xF << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT)
|
||||
#define TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT 16
|
||||
#define TEGRA_AES_SECURE_INPUT_KEY_LEN_FIELD (0xFFF << TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT)
|
||||
#define TEGRA_AES_SECURE_RNG_ENB_FIELD BIT(11)
|
||||
#define TEGRA_AES_SECURE_CORE_SEL_SHIFT 9
|
||||
#define TEGRA_AES_SECURE_CORE_SEL_FIELD BIT(9)
|
||||
#define TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT 7
|
||||
#define TEGRA_AES_SECURE_VCTRAM_SEL_FIELD (0x3 << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT)
|
||||
#define TEGRA_AES_SECURE_INPUT_SEL_SHIFT 5
|
||||
#define TEGRA_AES_SECURE_INPUT_SEL_FIELD (0x3 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT)
|
||||
#define TEGRA_AES_SECURE_XOR_POS_SHIFT 3
|
||||
#define TEGRA_AES_SECURE_XOR_POS_FIELD (0x3 << TEGRA_AES_SECURE_XOR_POS_SHIFT)
|
||||
#define TEGRA_AES_SECURE_HASH_ENB_FIELD BIT(2)
|
||||
#define TEGRA_AES_SECURE_ON_THE_FLY_FIELD BIT(0)
|
||||
|
||||
/* interrupt error mask */
|
||||
#define TEGRA_AES_INT_ERROR_MASK 0xFFF000
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user