Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.10: API: - add skcipher walk interface - add asynchronous compression (acomp) interface - fix algif_aed AIO handling of zero buffer Algorithms: - fix unaligned access in poly1305 - fix DRBG output to large buffers Drivers: - add support for iMX6UL to caam - fix givenc descriptors (used by IPsec) in caam - accelerated SHA256/SHA512 for ARM64 from OpenSSL - add SSE CRCT10DIF and CRC32 to ARM/ARM64 - add AEAD support to Chelsio chcr - add Armada 8K support to omap-rng" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits) crypto: testmgr - fix overlap in chunked tests again crypto: arm/crc32 - accelerated support based on x86 SSE implementation crypto: arm64/crc32 - accelerated support based on x86 SSE implementation crypto: arm/crct10dif - port x86 SSE implementation to ARM crypto: arm64/crct10dif - port x86 SSE implementation to arm64 crypto: testmgr - add/enhance test cases for CRC-T10DIF crypto: testmgr - avoid overlap in chunked tests crypto: chcr - checking for IS_ERR() instead of NULL crypto: caam - check caam_emi_slow instead of re-lookup platform crypto: algif_aead - fix AIO handling of zero buffer crypto: aes-ce - Make aes_simd_algs static crypto: algif_skcipher - set error code when kcalloc fails crypto: caam - make aamalg_desc a proper module crypto: caam - pass key buffers with typesafe pointers crypto: arm64/aes-ce-ccm - Fix AEAD decryption length MAINTAINERS: add crypto headers to crypto entry crypt: doc - remove misleading mention of async API crypto: doc - fix header file name crypto: api - fix comment typo crypto: skcipher - Add separate walker for AEAD decryption ..
This commit is contained in:
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
|
||||
ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
|
||||
&ctx->sa_out_dma_addr, GFP_ATOMIC);
|
||||
if (ctx->sa_out == NULL) {
|
||||
dma_free_coherent(ctx->dev->core_dev->device,
|
||||
ctx->sa_len * 4,
|
||||
dma_free_coherent(ctx->dev->core_dev->device, size * 4,
|
||||
ctx->sa_in, ctx->sa_in_dma_addr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#define AES_MR_OPMOD_CFB (0x3 << 12)
|
||||
#define AES_MR_OPMOD_CTR (0x4 << 12)
|
||||
#define AES_MR_OPMOD_GCM (0x5 << 12)
|
||||
#define AES_MR_OPMOD_XTS (0x6 << 12)
|
||||
#define AES_MR_LOD (0x1 << 15)
|
||||
#define AES_MR_CFBS_MASK (0x7 << 16)
|
||||
#define AES_MR_CFBS_128b (0x0 << 16)
|
||||
@@ -67,6 +68,9 @@
|
||||
#define AES_CTRR 0x98
|
||||
#define AES_GCMHR(x) (0x9c + ((x) * 0x04))
|
||||
|
||||
#define AES_TWR(x) (0xc0 + ((x) * 0x04))
|
||||
#define AES_ALPHAR(x) (0xd0 + ((x) * 0x04))
|
||||
|
||||
#define AES_HW_VERSION 0xFC
|
||||
|
||||
#endif /* __ATMEL_AES_REGS_H__ */
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/platform_data/crypto-atmel.h>
|
||||
#include <dt-bindings/dma/at91.h>
|
||||
@@ -68,6 +69,7 @@
|
||||
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
|
||||
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
|
||||
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
|
||||
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
|
||||
|
||||
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
|
||||
AES_FLAGS_ENCRYPT | \
|
||||
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
|
||||
bool has_cfb64;
|
||||
bool has_ctr32;
|
||||
bool has_gcm;
|
||||
bool has_xts;
|
||||
u32 max_burst_size;
|
||||
};
|
||||
|
||||
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
|
||||
atmel_aes_fn_t ghash_resume;
|
||||
};
|
||||
|
||||
struct atmel_aes_xts_ctx {
|
||||
struct atmel_aes_base_ctx base;
|
||||
|
||||
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
|
||||
};
|
||||
|
||||
struct atmel_aes_reqctx {
|
||||
unsigned long mode;
|
||||
};
|
||||
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
|
||||
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
|
||||
break;
|
||||
|
||||
case AES_TWR(0):
|
||||
case AES_TWR(1):
|
||||
case AES_TWR(2):
|
||||
case AES_TWR(3):
|
||||
snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
|
||||
break;
|
||||
|
||||
case AES_ALPHAR(0):
|
||||
case AES_ALPHAR(1):
|
||||
case AES_ALPHAR(2):
|
||||
case AES_ALPHAR(3):
|
||||
snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
|
||||
break;
|
||||
|
||||
default:
|
||||
snprintf(tmp, sz, "0x%02x", offset);
|
||||
break;
|
||||
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
|
||||
char tmp[16];
|
||||
|
||||
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
|
||||
atmel_aes_reg_name(offset, tmp));
|
||||
atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
|
||||
}
|
||||
#endif /* VERBOSE_DEBUG */
|
||||
|
||||
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
|
||||
const u32 *iv)
|
||||
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
|
||||
const u32 *iv, const u32 *key, int keylen)
|
||||
{
|
||||
u32 valmr = 0;
|
||||
|
||||
/* MR register must be set before IV registers */
|
||||
if (dd->ctx->keylen == AES_KEYSIZE_128)
|
||||
if (keylen == AES_KEYSIZE_128)
|
||||
valmr |= AES_MR_KEYSIZE_128;
|
||||
else if (dd->ctx->keylen == AES_KEYSIZE_192)
|
||||
else if (keylen == AES_KEYSIZE_192)
|
||||
valmr |= AES_MR_KEYSIZE_192;
|
||||
else
|
||||
valmr |= AES_MR_KEYSIZE_256;
|
||||
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
|
||||
|
||||
atmel_aes_write(dd, AES_MR, valmr);
|
||||
|
||||
atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
|
||||
SIZE_IN_WORDS(dd->ctx->keylen));
|
||||
atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
|
||||
|
||||
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
|
||||
atmel_aes_write_block(dd, AES_IVR(0), iv);
|
||||
}
|
||||
|
||||
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
|
||||
const u32 *iv)
|
||||
|
||||
{
|
||||
atmel_aes_write_ctrl_key(dd, use_dma, iv,
|
||||
dd->ctx->key, dd->ctx->keylen);
|
||||
}
|
||||
|
||||
/* CPU transfer */
|
||||
|
||||
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
|
||||
};
|
||||
|
||||
|
||||
/* xts functions */
|
||||
|
||||
static inline struct atmel_aes_xts_ctx *
|
||||
atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
|
||||
{
|
||||
return container_of(ctx, struct atmel_aes_xts_ctx, base);
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
|
||||
|
||||
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
|
||||
{
|
||||
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
|
||||
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
atmel_aes_set_mode(dd, rctx);
|
||||
|
||||
err = atmel_aes_hw_init(dd);
|
||||
if (err)
|
||||
return atmel_aes_complete(dd, err);
|
||||
|
||||
/* Compute the tweak value from req->info with ecb(aes). */
|
||||
flags = dd->flags;
|
||||
dd->flags &= ~AES_FLAGS_MODE_MASK;
|
||||
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
|
||||
atmel_aes_write_ctrl_key(dd, false, NULL,
|
||||
ctx->key2, ctx->base.keylen);
|
||||
dd->flags = flags;
|
||||
|
||||
atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
|
||||
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
|
||||
{
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
|
||||
bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
|
||||
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
|
||||
static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
|
||||
u8 *tweak_bytes = (u8 *)tweak;
|
||||
int i;
|
||||
|
||||
/* Read the computed ciphered tweak value. */
|
||||
atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
|
||||
/*
|
||||
* Hardware quirk:
|
||||
* the order of the ciphered tweak bytes need to be reversed before
|
||||
* writing them into the ODATARx registers.
|
||||
*/
|
||||
for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
|
||||
u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
|
||||
|
||||
tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
|
||||
tweak_bytes[i] = tmp;
|
||||
}
|
||||
|
||||
/* Process the data. */
|
||||
atmel_aes_write_ctrl(dd, use_dma, NULL);
|
||||
atmel_aes_write_block(dd, AES_TWR(0), tweak);
|
||||
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
|
||||
if (use_dma)
|
||||
return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
|
||||
atmel_aes_transfer_complete);
|
||||
|
||||
return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
|
||||
atmel_aes_transfer_complete);
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memcpy(ctx->base.key, key, keylen/2);
|
||||
memcpy(ctx->key2, key + keylen/2, keylen/2);
|
||||
ctx->base.keylen = keylen/2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return atmel_aes_crypt(req, AES_FLAGS_XTS);
|
||||
}
|
||||
|
||||
static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
|
||||
ctx->base.start = atmel_aes_xts_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_xts_alg = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "atmel-xts-aes",
|
||||
.cra_priority = ATMEL_AES_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
|
||||
.cra_alignmask = 0xf,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = atmel_aes_xts_cra_init,
|
||||
.cra_exit = atmel_aes_cra_exit,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = atmel_aes_xts_setkey,
|
||||
.encrypt = atmel_aes_xts_encrypt,
|
||||
.decrypt = atmel_aes_xts_decrypt,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/* Probe functions */
|
||||
|
||||
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
|
||||
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (dd->caps.has_xts)
|
||||
crypto_unregister_alg(&aes_xts_alg);
|
||||
|
||||
if (dd->caps.has_gcm)
|
||||
crypto_unregister_aead(&aes_gcm_alg);
|
||||
|
||||
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
||||
goto err_aes_gcm_alg;
|
||||
}
|
||||
|
||||
if (dd->caps.has_xts) {
|
||||
err = crypto_register_alg(&aes_xts_alg);
|
||||
if (err)
|
||||
goto err_aes_xts_alg;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_aes_xts_alg:
|
||||
crypto_unregister_aead(&aes_gcm_alg);
|
||||
err_aes_gcm_alg:
|
||||
crypto_unregister_alg(&aes_cfb64_alg);
|
||||
err_aes_cfb64_alg:
|
||||
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
dd->caps.has_cfb64 = 0;
|
||||
dd->caps.has_ctr32 = 0;
|
||||
dd->caps.has_gcm = 0;
|
||||
dd->caps.has_xts = 0;
|
||||
dd->caps.max_burst_size = 1;
|
||||
|
||||
/* keep only major version number */
|
||||
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_ctr32 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.has_xts = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
case 0x200:
|
||||
@@ -2138,7 +2311,7 @@ aes_dd_err:
|
||||
|
||||
static int atmel_aes_remove(struct platform_device *pdev)
|
||||
{
|
||||
static struct atmel_aes_dev *aes_dd;
|
||||
struct atmel_aes_dev *aes_dd;
|
||||
|
||||
aes_dd = platform_get_drvdata(pdev);
|
||||
if (!aes_dd)
|
||||
|
@@ -74,7 +74,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
tristate "Register algorithm implementations with the Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
@@ -89,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
tristate "Register hash algorithm implementations with Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
@@ -101,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
tristate "Register public key cryptography implementations with Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_RSA
|
||||
help
|
||||
@@ -113,7 +113,7 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
tristate "Register caam device for hwrng API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_RNG
|
||||
select HW_RANDOM
|
||||
@@ -134,3 +134,6 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
help
|
||||
Selecting this will enable printing of various debug
|
||||
information in the CAAM driver.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
|
@@ -8,6 +8,7 @@ endif
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
|
||||
|
File diff suppressed because it is too large
Load Diff
1306
drivers/crypto/caam/caamalg_desc.c
Normal file
1306
drivers/crypto/caam/caamalg_desc.c
Normal file
File diff suppressed because it is too large
Load Diff
97
drivers/crypto/caam/caamalg_desc.h
Normal file
97
drivers/crypto/caam/caamalg_desc.h
Normal file
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Shared descriptors for aead, ablkcipher algorithms
|
||||
*
|
||||
* Copyright 2016 NXP
|
||||
*/
|
||||
|
||||
#ifndef _CAAMALG_DESC_H_
|
||||
#define _CAAMALG_DESC_H_
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
|
||||
|
||||
/* Note: Nonce is counted in cdata.keylen */
|
||||
#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
|
||||
#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
|
||||
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
20 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
15 * CAAM_CMD_SZ)
|
||||
|
||||
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
|
||||
struct alginfo *adata, unsigned int icvsize,
|
||||
const bool is_rfc3686, u32 *nonce,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
|
||||
struct alginfo *adata, unsigned int ivsize,
|
||||
unsigned int icvsize, const bool geniv,
|
||||
const bool is_rfc3686, u32 *nonce,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
|
||||
struct alginfo *adata, unsigned int ivsize,
|
||||
unsigned int icvsize, const bool is_rfc3686,
|
||||
u32 *nonce, const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
|
||||
|
||||
void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
|
||||
|
||||
#endif /* _CAAMALG_DESC_H_ */
|
@@ -72,7 +72,7 @@
|
||||
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
||||
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
|
||||
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
|
||||
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
|
||||
dma_addr_t sh_desc_update_first_dma;
|
||||
dma_addr_t sh_desc_fin_dma;
|
||||
dma_addr_t sh_desc_digest_dma;
|
||||
dma_addr_t sh_desc_finup_dma;
|
||||
struct device *jrdev;
|
||||
u32 alg_type;
|
||||
u32 alg_op;
|
||||
u8 key[CAAM_MAX_HASH_KEY_SIZE];
|
||||
dma_addr_t key_dma;
|
||||
int ctx_len;
|
||||
unsigned int split_key_len;
|
||||
unsigned int split_key_pad_len;
|
||||
struct alginfo adata;
|
||||
};
|
||||
|
||||
/* ahash state */
|
||||
@@ -222,118 +217,66 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Common shared descriptor commands */
|
||||
static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
|
||||
/*
|
||||
* For ahash update, final and finup (import_ctx = true)
|
||||
* import context, read and write to seqout
|
||||
* For ahash firsts and digest (import_ctx = false)
|
||||
* read and write to seqout
|
||||
*/
|
||||
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
|
||||
struct caam_hash_ctx *ctx, bool import_ctx)
|
||||
{
|
||||
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
|
||||
ctx->split_key_len, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
}
|
||||
|
||||
/* Append key if it has been set */
|
||||
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
u32 op = ctx->adata.algtype;
|
||||
u32 *skip_key_load;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
if (ctx->split_key_len) {
|
||||
/* Skip if already shared */
|
||||
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
/* Append key if it has been set; ahash update excluded */
|
||||
if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
|
||||
/* Skip key loading if already shared */
|
||||
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
|
||||
append_key_ahash(desc, ctx);
|
||||
append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
|
||||
ctx->adata.keylen, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
set_jump_tgt_here(desc, skip_key_load);
|
||||
|
||||
op |= OP_ALG_AAI_HMAC_PRECOMP;
|
||||
}
|
||||
|
||||
/* Propagate errors from shared to job descriptor */
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
}
|
||||
/* If needed, import context from software */
|
||||
if (import_ctx)
|
||||
append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
/*
|
||||
* For ahash read data from seqin following state->caam_ctx,
|
||||
* and write resulting class2 context to seqout, which may be state->caam_ctx
|
||||
* or req->result
|
||||
*/
|
||||
static inline void ahash_append_load_str(u32 *desc, int digestsize)
|
||||
{
|
||||
/* Calculate remaining bytes to read */
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
||||
|
||||
/*
|
||||
* Load from buf and/or src and write to req->result or state->context
|
||||
* Calculate remaining bytes to read
|
||||
*/
|
||||
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
|
||||
/* Read remaining bytes */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
|
||||
FIFOLD_TYPE_MSG | KEY_VLF);
|
||||
|
||||
/* Store class2 context bytes */
|
||||
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
}
|
||||
|
||||
/*
|
||||
* For ahash update, final and finup, import context, read and write to seqout
|
||||
*/
|
||||
static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
|
||||
int digestsize,
|
||||
struct caam_hash_ctx *ctx)
|
||||
{
|
||||
init_sh_desc_key_ahash(desc, ctx);
|
||||
|
||||
/* Import context from software */
|
||||
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_2_CCB | ctx->ctx_len);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
||||
|
||||
/*
|
||||
* Load from buf and/or src and write to req->result or state->context
|
||||
*/
|
||||
ahash_append_load_str(desc, digestsize);
|
||||
}
|
||||
|
||||
/* For ahash firsts and digest, read and write to seqout */
|
||||
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
|
||||
int digestsize, struct caam_hash_ctx *ctx)
|
||||
{
|
||||
init_sh_desc_key_ahash(desc, ctx);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
||||
|
||||
/*
|
||||
* Load from buf and/or src and write to req->result or state->context
|
||||
*/
|
||||
ahash_append_load_str(desc, digestsize);
|
||||
}
|
||||
|
||||
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
{
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 have_key = 0;
|
||||
u32 *desc;
|
||||
|
||||
if (ctx->split_key_len)
|
||||
have_key = OP_ALG_AAI_HMAC_PRECOMP;
|
||||
|
||||
/* ahash_update shared descriptor */
|
||||
desc = ctx->sh_desc_update;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Import context from software */
|
||||
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_2_CCB | ctx->ctx_len);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
/* Load data and write to result or context */
|
||||
ahash_append_load_str(desc, ctx->ctx_len);
|
||||
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
|
||||
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
|
||||
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_update_first shared descriptor */
|
||||
desc = ctx->sh_desc_update_first;
|
||||
|
||||
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
|
||||
ctx->ctx_len, ctx);
|
||||
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
|
||||
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_final shared descriptor */
|
||||
desc = ctx->sh_desc_fin;
|
||||
|
||||
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
|
||||
OP_ALG_AS_FINALIZE, digestsize, ctx);
|
||||
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
|
||||
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
|
||||
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
/* ahash_finup shared descriptor */
|
||||
desc = ctx->sh_desc_finup;
|
||||
|
||||
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
|
||||
OP_ALG_AS_FINALIZE, digestsize, ctx);
|
||||
|
||||
ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
/* ahash_digest shared descriptor */
|
||||
desc = ctx->sh_desc_digest;
|
||||
|
||||
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
|
||||
digestsize, ctx);
|
||||
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
|
||||
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
u32 keylen)
|
||||
{
|
||||
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
|
||||
ctx->split_key_pad_len, key_in, keylen,
|
||||
ctx->alg_op);
|
||||
}
|
||||
|
||||
/* Digest hash size if it is too large */
|
||||
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
u32 *keylen, u8 *key_out, u32 digestsize)
|
||||
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
}
|
||||
|
||||
/* Job descriptor to perform unkeyed hash on key_in */
|
||||
append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
|
||||
append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
|
||||
OP_ALG_AS_INITFINAL);
|
||||
append_seq_in_ptr(desc, src_dma, *keylen, 0);
|
||||
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
|
||||
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
static int ahash_setkey(struct crypto_ahash *ahash,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
|
||||
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
||||
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
||||
key = hashed_key;
|
||||
}
|
||||
|
||||
/* Pick class 2 key length from algorithm submask */
|
||||
ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
|
||||
OP_ALG_ALGSEL_SHIFT] * 2;
|
||||
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
|
||||
ctx->split_key_len, ctx->split_key_pad_len);
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
ret = gen_split_hash_key(ctx, key, keylen);
|
||||
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
|
||||
CAAM_MAX_HASH_KEY_SIZE);
|
||||
if (ret)
|
||||
goto bad_free_key;
|
||||
|
||||
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
|
||||
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->key_dma)) {
|
||||
dev_err(jrdev, "unable to map key i/o memory\n");
|
||||
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
ctx->split_key_pad_len, 1);
|
||||
ctx->adata.keylen_pad, 1);
|
||||
#endif
|
||||
|
||||
ret = ahash_set_sh_desc(ahash);
|
||||
if (ret) {
|
||||
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
|
||||
dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
error_free_key:
|
||||
kfree(hashed_key);
|
||||
return ret;
|
||||
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
edesc = (struct ahash_edesc *)((char *)desc -
|
||||
offsetof(struct ahash_edesc, hw_desc));
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
edesc = (struct ahash_edesc *)((char *)desc -
|
||||
offsetof(struct ahash_edesc, hw_desc));
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
edesc = (struct ahash_edesc *)((char *)desc -
|
||||
offsetof(struct ahash_edesc, hw_desc));
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
|
||||
edesc = (struct ahash_edesc *)((char *)desc -
|
||||
offsetof(struct ahash_edesc, hw_desc));
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
|
||||
ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
|
||||
flags);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
|
||||
unsigned int blocksize;
|
||||
struct ahash_alg template_ahash;
|
||||
u32 alg_type;
|
||||
u32 alg_op;
|
||||
};
|
||||
|
||||
/* ahash descriptors */
|
||||
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_SHA1,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
|
||||
}, {
|
||||
.name = "sha224",
|
||||
.driver_name = "sha224-caam",
|
||||
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_SHA224,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
|
||||
}, {
|
||||
.name = "sha256",
|
||||
.driver_name = "sha256-caam",
|
||||
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_SHA256,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
|
||||
}, {
|
||||
.name = "sha384",
|
||||
.driver_name = "sha384-caam",
|
||||
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_SHA384,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
|
||||
}, {
|
||||
.name = "sha512",
|
||||
.driver_name = "sha512-caam",
|
||||
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_SHA512,
|
||||
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
|
||||
}, {
|
||||
.name = "md5",
|
||||
.driver_name = "md5-caam",
|
||||
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
|
||||
},
|
||||
},
|
||||
.alg_type = OP_ALG_ALGSEL_MD5,
|
||||
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
|
||||
},
|
||||
};
|
||||
|
||||
struct caam_hash_alg {
|
||||
struct list_head entry;
|
||||
int alg_type;
|
||||
int alg_op;
|
||||
struct ahash_alg ahash_alg;
|
||||
};
|
||||
|
||||
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(ctx->jrdev);
|
||||
}
|
||||
/* copy descriptor header template value */
|
||||
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
|
||||
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
|
||||
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
|
||||
|
||||
ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
|
||||
ctx->ctx_len = runninglen[(ctx->adata.algtype &
|
||||
OP_ALG_ALGSEL_SUBMASK) >>
|
||||
OP_ALG_ALGSEL_SHIFT];
|
||||
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
||||
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
|
||||
desc_bytes(ctx->sh_desc_digest),
|
||||
DMA_TO_DEVICE);
|
||||
if (ctx->sh_desc_finup_dma &&
|
||||
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
|
||||
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
|
||||
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
|
||||
|
||||
caam_jr_free(ctx->jrdev);
|
||||
}
|
||||
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
|
||||
alg->cra_type = &crypto_ahash_type;
|
||||
|
||||
t_alg->alg_type = template->alg_type;
|
||||
t_alg->alg_op = template->alg_op;
|
||||
|
||||
return t_alg;
|
||||
}
|
||||
|
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct rsa_key raw_key = {0};
|
||||
struct rsa_key raw_key = {NULL};
|
||||
struct caam_rsa_key *rsa_key = &ctx->key;
|
||||
int ret;
|
||||
|
||||
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct rsa_key raw_key = {0};
|
||||
struct rsa_key raw_key = {NULL};
|
||||
struct caam_rsa_key *rsa_key = &ctx->key;
|
||||
int ret;
|
||||
|
||||
|
@@ -52,7 +52,7 @@
|
||||
|
||||
/* length of descriptors */
|
||||
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
|
||||
#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
|
||||
#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
|
||||
|
||||
/* Buffer, its dma address and lock */
|
||||
struct buf_data {
|
||||
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
|
||||
{
|
||||
struct buf_data *bd;
|
||||
|
||||
bd = (struct buf_data *)((char *)desc -
|
||||
offsetof(struct buf_data, hw_desc));
|
||||
bd = container_of(desc, struct buf_data, hw_desc[0]);
|
||||
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Propagate errors from shared to job descriptor */
|
||||
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
|
||||
|
||||
/* Generate random bytes */
|
||||
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
|
||||
|
||||
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
return PTR_ERR(dev);
|
||||
}
|
||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
|
||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
|
||||
if (!rng_ctx) {
|
||||
err = -ENOMEM;
|
||||
goto free_caam_alloc;
|
||||
|
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
|
||||
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
|
||||
if (ctrlpriv->caam_emi_slow)
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
|
||||
*/
|
||||
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
|
||||
>> RTSDCTL_ENT_DLY_SHIFT;
|
||||
if (ent_delay <= val) {
|
||||
/* put RNG4 into run mode */
|
||||
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
|
||||
return;
|
||||
}
|
||||
if (ent_delay <= val)
|
||||
goto start_rng;
|
||||
|
||||
val = rd_reg32(&r4tst->rtsdctl);
|
||||
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
|
||||
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
|
||||
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
|
||||
/* read the control register */
|
||||
val = rd_reg32(&r4tst->rtmctl);
|
||||
start_rng:
|
||||
/*
|
||||
* select raw sampling in both entropy shifter
|
||||
* and statistical checker
|
||||
* and statistical checker; ; put RNG4 into run mode
|
||||
*/
|
||||
clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
|
||||
/* put RNG4 into run mode */
|
||||
clrsetbits_32(&val, RTMCTL_PRGM, 0);
|
||||
/* write back the control register */
|
||||
wr_reg32(&r4tst->rtmctl, val);
|
||||
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
|
||||
}
|
||||
ctrlpriv->caam_aclk = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM emi_slow clk: %d\n", ret);
|
||||
return ret;
|
||||
if (!of_machine_is_compatible("fsl,imx6ul")) {
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM emi_slow clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_emi_slow = clk;
|
||||
}
|
||||
ctrlpriv->caam_emi_slow = clk;
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
|
||||
if (ret < 0) {
|
||||
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
|
||||
goto disable_caam_mem;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_aclk;
|
||||
if (ctrlpriv->caam_emi_slow) {
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_aclk;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get configuration properties from device tree */
|
||||
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
|
||||
else
|
||||
BLOCK_OFFSET = PG_SIZE_64K;
|
||||
|
||||
ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
|
||||
ctrlpriv->assure = (struct caam_assurance __force *)
|
||||
((uint8_t *)ctrl +
|
||||
ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
|
||||
ctrlpriv->assure = (struct caam_assurance __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
|
||||
);
|
||||
ctrlpriv->deco = (struct caam_deco __force *)
|
||||
((uint8_t *)ctrl +
|
||||
ctrlpriv->deco = (struct caam_deco __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
BLOCK_OFFSET * DECO_BLOCK_NUMBER
|
||||
);
|
||||
|
||||
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ring);
|
||||
continue;
|
||||
}
|
||||
ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
|
||||
((uint8_t *)ctrl +
|
||||
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
(ring + JR_BLOCK_NUMBER) *
|
||||
BLOCK_OFFSET
|
||||
);
|
||||
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
|
||||
CTPR_MS_QI_MASK);
|
||||
if (ctrlpriv->qi_present) {
|
||||
ctrlpriv->qi = (struct caam_queue_if __force *)
|
||||
((uint8_t *)ctrl +
|
||||
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
BLOCK_OFFSET * QI_BLOCK_NUMBER
|
||||
);
|
||||
/* This is all that's required to physically enable QI */
|
||||
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
&caam_fops_u32_ro);
|
||||
|
||||
/* Internal covering keys (useful in non-secure mode only) */
|
||||
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
|
||||
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
|
||||
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
||||
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
|
||||
S_IRUSR |
|
||||
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->ctl,
|
||||
&ctrlpriv->ctl_kek_wrap);
|
||||
|
||||
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
|
||||
ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
|
||||
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
||||
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
|
||||
S_IRUSR |
|
||||
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->ctl,
|
||||
&ctrlpriv->ctl_tkek_wrap);
|
||||
|
||||
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
|
||||
ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
|
||||
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
|
||||
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
|
||||
S_IRUSR |
|
||||
@@ -833,7 +831,8 @@ caam_remove:
|
||||
iounmap_ctrl:
|
||||
iounmap(ctrl);
|
||||
disable_caam_emi_slow:
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
if (ctrlpriv->caam_emi_slow)
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
disable_caam_aclk:
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
disable_caam_mem:
|
||||
|
@@ -22,12 +22,6 @@
|
||||
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
|
||||
#define SEC4_SG_OFFSET_MASK 0x00001fff
|
||||
|
||||
struct sec4_sg_entry {
|
||||
u64 ptr;
|
||||
u32 len;
|
||||
u32 bpid_offset;
|
||||
};
|
||||
|
||||
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
|
||||
#define MAX_CAAM_DESCSIZE 64
|
||||
|
||||
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
|
||||
#define HDR_ZRO 0x00008000
|
||||
|
||||
/* Start Index or SharedDesc Length */
|
||||
#define HDR_START_IDX_MASK 0x3f
|
||||
#define HDR_START_IDX_SHIFT 16
|
||||
#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
|
||||
|
||||
/* If shared descriptor header, 6-bit length */
|
||||
#define HDR_DESCLEN_SHR_MASK 0x3f
|
||||
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
|
||||
#define HDR_PROP_DNR 0x00000800
|
||||
|
||||
/* JobDesc/SharedDesc share property */
|
||||
#define HDR_SD_SHARE_MASK 0x03
|
||||
#define HDR_SD_SHARE_SHIFT 8
|
||||
#define HDR_JD_SHARE_MASK 0x07
|
||||
#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
|
||||
#define HDR_JD_SHARE_SHIFT 8
|
||||
#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
|
||||
|
||||
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
|
||||
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
|
||||
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
|
||||
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
|
||||
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
|
||||
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
|
||||
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
|
||||
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
|
||||
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
|
||||
/* For non-protocol/alg-only op commands */
|
||||
#define OP_ALG_TYPE_SHIFT 24
|
||||
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
|
||||
#define OP_ALG_TYPE_CLASS1 2
|
||||
#define OP_ALG_TYPE_CLASS2 4
|
||||
#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
|
||||
#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
|
||||
|
||||
#define OP_ALG_ALGSEL_SHIFT 16
|
||||
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
|
||||
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
|
||||
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
|
||||
|
||||
/* PKHA mode copy-memory functions */
|
||||
#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
|
||||
#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
|
||||
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
|
||||
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
|
||||
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
|
||||
|
@@ -33,38 +33,39 @@
|
||||
|
||||
extern bool caam_little_end;
|
||||
|
||||
static inline int desc_len(u32 *desc)
|
||||
static inline int desc_len(u32 * const desc)
|
||||
{
|
||||
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
|
||||
}
|
||||
|
||||
static inline int desc_bytes(void *desc)
|
||||
static inline int desc_bytes(void * const desc)
|
||||
{
|
||||
return desc_len(desc) * CAAM_CMD_SZ;
|
||||
}
|
||||
|
||||
static inline u32 *desc_end(u32 *desc)
|
||||
static inline u32 *desc_end(u32 * const desc)
|
||||
{
|
||||
return desc + desc_len(desc);
|
||||
}
|
||||
|
||||
static inline void *sh_desc_pdb(u32 *desc)
|
||||
static inline void *sh_desc_pdb(u32 * const desc)
|
||||
{
|
||||
return desc + 1;
|
||||
}
|
||||
|
||||
static inline void init_desc(u32 *desc, u32 options)
|
||||
static inline void init_desc(u32 * const desc, u32 options)
|
||||
{
|
||||
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
|
||||
}
|
||||
|
||||
static inline void init_sh_desc(u32 *desc, u32 options)
|
||||
static inline void init_sh_desc(u32 * const desc, u32 options)
|
||||
{
|
||||
PRINT_POS;
|
||||
init_desc(desc, CMD_SHARED_DESC_HDR | options);
|
||||
}
|
||||
|
||||
static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
|
||||
static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
|
||||
size_t pdb_bytes)
|
||||
{
|
||||
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
|
||||
|
||||
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
|
||||
options);
|
||||
}
|
||||
|
||||
static inline void init_job_desc(u32 *desc, u32 options)
|
||||
static inline void init_job_desc(u32 * const desc, u32 options)
|
||||
{
|
||||
init_desc(desc, CMD_DESC_HDR | options);
|
||||
}
|
||||
|
||||
static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
|
||||
static inline void init_job_desc_pdb(u32 * const desc, u32 options,
|
||||
size_t pdb_bytes)
|
||||
{
|
||||
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
|
||||
|
||||
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
|
||||
}
|
||||
|
||||
static inline void append_ptr(u32 *desc, dma_addr_t ptr)
|
||||
static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
|
||||
{
|
||||
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
|
||||
|
||||
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
|
||||
CAAM_PTR_SZ / CAAM_CMD_SZ);
|
||||
}
|
||||
|
||||
static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
|
||||
u32 options)
|
||||
static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
|
||||
int len, u32 options)
|
||||
{
|
||||
PRINT_POS;
|
||||
init_job_desc(desc, HDR_SHARED | options |
|
||||
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
|
||||
append_ptr(desc, ptr);
|
||||
}
|
||||
|
||||
static inline void append_data(u32 *desc, void *data, int len)
|
||||
static inline void append_data(u32 * const desc, void *data, int len)
|
||||
{
|
||||
u32 *offset = desc_end(desc);
|
||||
|
||||
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
|
||||
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
|
||||
}
|
||||
|
||||
static inline void append_cmd(u32 *desc, u32 command)
|
||||
static inline void append_cmd(u32 * const desc, u32 command)
|
||||
{
|
||||
u32 *cmd = desc_end(desc);
|
||||
|
||||
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
|
||||
|
||||
#define append_u32 append_cmd
|
||||
|
||||
static inline void append_u64(u32 *desc, u64 data)
|
||||
static inline void append_u64(u32 * const desc, u64 data)
|
||||
{
|
||||
u32 *offset = desc_end(desc);
|
||||
|
||||
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
|
||||
}
|
||||
|
||||
/* Write command without affecting header, and return pointer to next word */
|
||||
static inline u32 *write_cmd(u32 *desc, u32 command)
|
||||
static inline u32 *write_cmd(u32 * const desc, u32 command)
|
||||
{
|
||||
*desc = cpu_to_caam32(command);
|
||||
|
||||
return desc + 1;
|
||||
}
|
||||
|
||||
static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
|
||||
static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
|
||||
u32 command)
|
||||
{
|
||||
append_cmd(desc, command | len);
|
||||
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
|
||||
}
|
||||
|
||||
/* Write length after pointer, rather than inside command */
|
||||
static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
|
||||
static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
|
||||
unsigned int len, u32 command)
|
||||
{
|
||||
append_cmd(desc, command);
|
||||
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
|
||||
append_cmd(desc, len);
|
||||
}
|
||||
|
||||
static inline void append_cmd_data(u32 *desc, void *data, int len,
|
||||
static inline void append_cmd_data(u32 * const desc, void *data, int len,
|
||||
u32 command)
|
||||
{
|
||||
append_cmd(desc, command | IMMEDIATE | len);
|
||||
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
|
||||
}
|
||||
|
||||
#define APPEND_CMD_RET(cmd, op) \
|
||||
static inline u32 *append_##cmd(u32 *desc, u32 options) \
|
||||
static inline u32 *append_##cmd(u32 * const desc, u32 options) \
|
||||
{ \
|
||||
u32 *cmd = desc_end(desc); \
|
||||
PRINT_POS; \
|
||||
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
|
||||
APPEND_CMD_RET(jump, JUMP)
|
||||
APPEND_CMD_RET(move, MOVE)
|
||||
|
||||
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
|
||||
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
|
||||
{
|
||||
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
|
||||
(desc_len(desc) - (jump_cmd - desc)));
|
||||
}
|
||||
|
||||
static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
|
||||
static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
|
||||
{
|
||||
u32 val = caam32_to_cpu(*move_cmd);
|
||||
|
||||
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
|
||||
}
|
||||
|
||||
#define APPEND_CMD(cmd, op) \
|
||||
static inline void append_##cmd(u32 *desc, u32 options) \
|
||||
static inline void append_##cmd(u32 * const desc, u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
append_cmd(desc, CMD_##op | options); \
|
||||
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
|
||||
APPEND_CMD(operation, OPERATION)
|
||||
|
||||
#define APPEND_CMD_LEN(cmd, op) \
|
||||
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
|
||||
static inline void append_##cmd(u32 * const desc, unsigned int len, \
|
||||
u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
append_cmd(desc, CMD_##op | len | options); \
|
||||
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
|
||||
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
|
||||
|
||||
#define APPEND_CMD_PTR(cmd, op) \
|
||||
static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
|
||||
u32 options) \
|
||||
static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
|
||||
unsigned int len, u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
|
||||
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
|
||||
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
|
||||
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
|
||||
|
||||
static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
|
||||
u32 options)
|
||||
static inline void append_store(u32 * const desc, dma_addr_t ptr,
|
||||
unsigned int len, u32 options)
|
||||
{
|
||||
u32 cmd_src;
|
||||
|
||||
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
|
||||
}
|
||||
|
||||
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
|
||||
static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
|
||||
static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
|
||||
dma_addr_t ptr, \
|
||||
unsigned int len, \
|
||||
u32 options) \
|
||||
{ \
|
||||
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
|
||||
APPEND_SEQ_PTR_INTLEN(out, OUT)
|
||||
|
||||
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
|
||||
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
|
||||
static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
|
||||
unsigned int len, u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
|
||||
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
|
||||
|
||||
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
|
||||
static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
|
||||
static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
|
||||
unsigned int len, u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
|
||||
* the size of its type
|
||||
*/
|
||||
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
|
||||
static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
|
||||
static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
|
||||
type len, u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
|
||||
* from length of immediate data provided, e.g., split keys
|
||||
*/
|
||||
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
|
||||
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
|
||||
static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
|
||||
unsigned int data_len, \
|
||||
unsigned int len, u32 options) \
|
||||
{ \
|
||||
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
|
||||
APPEND_CMD_PTR_TO_IMM2(key, KEY);
|
||||
|
||||
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
|
||||
static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
|
||||
static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
|
||||
u32 options) \
|
||||
{ \
|
||||
PRINT_POS; \
|
||||
@@ -426,3 +430,64 @@ do { \
|
||||
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
|
||||
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
|
||||
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
|
||||
|
||||
/**
|
||||
* struct alginfo - Container for algorithm details
|
||||
* @algtype: algorithm selector; for valid values, see documentation of the
|
||||
* functions where it is used.
|
||||
* @keylen: length of the provided algorithm key, in bytes
|
||||
* @keylen_pad: padded length of the provided algorithm key, in bytes
|
||||
* @key: address where algorithm key resides; virtual address if key_inline
|
||||
* is true, dma (bus) address if key_inline is false.
|
||||
* @key_inline: true - key can be inlined in the descriptor; false - key is
|
||||
* referenced by the descriptor
|
||||
*/
|
||||
struct alginfo {
|
||||
u32 algtype;
|
||||
unsigned int keylen;
|
||||
unsigned int keylen_pad;
|
||||
union {
|
||||
dma_addr_t key_dma;
|
||||
void *key_virt;
|
||||
};
|
||||
bool key_inline;
|
||||
};
|
||||
|
||||
/**
|
||||
* desc_inline_query() - Provide indications on which data items can be inlined
|
||||
* and which shall be referenced in a shared descriptor.
|
||||
* @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
|
||||
* excluding the data items to be inlined (or corresponding
|
||||
* pointer if an item is not inlined). Each cnstr_* function that
|
||||
* generates descriptors should have a define mentioning
|
||||
* corresponding length.
|
||||
* @jd_len: Maximum length of the job descriptor(s) that will be used
|
||||
* together with the shared descriptor.
|
||||
* @data_len: Array of lengths of the data items trying to be inlined
|
||||
* @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
|
||||
* otherwise.
|
||||
* @count: Number of data items (size of @data_len array); must be <= 32
|
||||
*
|
||||
* Return: 0 if data can be inlined / referenced, negative value if not. If 0,
|
||||
* check @inl_mask for details.
|
||||
*/
|
||||
static inline int desc_inline_query(unsigned int sd_base_len,
|
||||
unsigned int jd_len, unsigned int *data_len,
|
||||
u32 *inl_mask, unsigned int count)
|
||||
{
|
||||
int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
|
||||
unsigned int i;
|
||||
|
||||
*inl_mask = 0;
|
||||
for (i = 0; (i < count) && (rem_bytes > 0); i++) {
|
||||
if (rem_bytes - (int)(data_len[i] +
|
||||
(count - i - 1) * CAAM_PTR_SZ) >= 0) {
|
||||
rem_bytes -= data_len[i];
|
||||
*inl_mask |= (1 << i);
|
||||
} else {
|
||||
rem_bytes -= CAAM_PTR_SZ;
|
||||
}
|
||||
}
|
||||
|
||||
return (rem_bytes >= 0) ? 0 : -1;
|
||||
}
|
||||
|
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
|
||||
strlen(rng_err_id_list[err_id])) {
|
||||
/* RNG-only error */
|
||||
err_str = rng_err_id_list[err_id];
|
||||
} else if (err_id < ARRAY_SIZE(err_id_list))
|
||||
} else {
|
||||
err_str = err_id_list[err_id];
|
||||
else
|
||||
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* CCB ICV check failures are part of normal operation life;
|
||||
|
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
|
||||
struct device *dev;
|
||||
int ridx;
|
||||
struct caam_job_ring __iomem *rregs; /* JobR's register space */
|
||||
struct tasklet_struct irqtask;
|
||||
int irq; /* One per queue */
|
||||
|
||||
/* Number of scatterlist crypt transforms active on the JobR */
|
||||
|
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
|
||||
|
||||
ret = caam_reset_hw_jr(dev);
|
||||
|
||||
tasklet_kill(&jrp->irqtask);
|
||||
|
||||
/* Release interrupt */
|
||||
free_irq(jrp->irq, dev);
|
||||
|
||||
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
|
||||
|
||||
/*
|
||||
* Check the output ring for ready responses, kick
|
||||
* the threaded irq if jobs done.
|
||||
* tasklet if jobs done.
|
||||
*/
|
||||
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
|
||||
if (!irqstate)
|
||||
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
|
||||
/* Have valid interrupt at this point, just ACK and trigger */
|
||||
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
|
||||
|
||||
return IRQ_WAKE_THREAD;
|
||||
preempt_disable();
|
||||
tasklet_schedule(&jrp->irqtask);
|
||||
preempt_enable();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
|
||||
/* Deferred service handler, run as interrupt-fired tasklet */
|
||||
static void caam_jr_dequeue(unsigned long devarg)
|
||||
{
|
||||
int hw_idx, sw_idx, i, head, tail;
|
||||
struct device *dev = st_dev;
|
||||
struct device *dev = (struct device *)devarg;
|
||||
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
|
||||
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
|
||||
u32 *userdesc, userstatus;
|
||||
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
|
||||
|
||||
/* reenable / unmask IRQs */
|
||||
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
|
||||
|
||||
jrp = dev_get_drvdata(dev);
|
||||
|
||||
tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
|
||||
|
||||
/* Connect job ring interrupt handler. */
|
||||
error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
|
||||
caam_jr_threadirq, IRQF_SHARED,
|
||||
dev_name(dev), dev);
|
||||
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
|
||||
dev_name(dev), dev);
|
||||
if (error) {
|
||||
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
|
||||
jrp->ridx, jrp->irq);
|
||||
@@ -454,6 +460,7 @@ out_free_inpring:
|
||||
out_free_irq:
|
||||
free_irq(jrp->irq, dev);
|
||||
out_kill_deq:
|
||||
tasklet_kill(&jrp->irqtask);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
|
||||
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
|
||||
|
||||
if (sizeof(dma_addr_t) == sizeof(u64))
|
||||
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
|
||||
|
@@ -10,6 +10,36 @@
|
||||
#include "desc_constr.h"
|
||||
#include "key_gen.h"
|
||||
|
||||
/**
|
||||
* split_key_len - Compute MDHA split key length for a given algorithm
|
||||
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
|
||||
* SHA224, SHA384, SHA512.
|
||||
*
|
||||
* Return: MDHA split key length
|
||||
*/
|
||||
static inline u32 split_key_len(u32 hash)
|
||||
{
|
||||
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
|
||||
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
|
||||
u32 idx;
|
||||
|
||||
idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
|
||||
|
||||
return (u32)(mdpadlen[idx] * 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* split_key_pad_len - Compute MDHA split key pad length for a given algorithm
|
||||
* @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
|
||||
* SHA224, SHA384, SHA512.
|
||||
*
|
||||
* Return: MDHA split key pad length
|
||||
*/
|
||||
static inline u32 split_key_pad_len(u32 hash)
|
||||
{
|
||||
return ALIGN(split_key_len(hash), 16);
|
||||
}
|
||||
|
||||
void split_key_done(struct device *dev, u32 *desc, u32 err,
|
||||
void *context)
|
||||
{
|
||||
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
|
||||
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
|
||||
@0xffe04000
|
||||
*/
|
||||
int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
int split_key_pad_len, const u8 *key_in, u32 keylen,
|
||||
u32 alg_op)
|
||||
int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
struct alginfo * const adata, const u8 *key_in, u32 keylen,
|
||||
int max_keylen)
|
||||
{
|
||||
u32 *desc;
|
||||
struct split_key_result result;
|
||||
dma_addr_t dma_addr_in, dma_addr_out;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
|
||||
adata->keylen_pad = split_key_pad_len(adata->algtype &
|
||||
OP_ALG_ALGSEL_MASK);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "split keylen %d split keylen padded %d\n",
|
||||
adata->keylen, adata->keylen_pad);
|
||||
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
||||
#endif
|
||||
|
||||
if (adata->keylen_pad > max_keylen)
|
||||
return -EINVAL;
|
||||
|
||||
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
|
||||
if (!desc) {
|
||||
dev_err(jrdev, "unable to allocate key input memory\n");
|
||||
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
|
||||
dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(jrdev, dma_addr_out)) {
|
||||
dev_err(jrdev, "unable to map key output memory\n");
|
||||
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
|
||||
|
||||
/* Sets MDHA up into an HMAC-INIT */
|
||||
append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
|
||||
append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
|
||||
OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
|
||||
OP_ALG_AS_INIT);
|
||||
|
||||
/*
|
||||
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
|
||||
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
* FIFO_STORE with the explicit split-key content store
|
||||
* (0x26 output type)
|
||||
*/
|
||||
append_fifo_store(desc, dma_addr_out, split_key_len,
|
||||
append_fifo_store(desc, dma_addr_out, adata->keylen,
|
||||
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
||||
|
||||
#ifdef DEBUG
|
||||
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
||||
split_key_pad_len, 1);
|
||||
adata->keylen_pad, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
|
||||
dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
|
||||
DMA_FROM_DEVICE);
|
||||
out_unmap_in:
|
||||
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
|
||||
|
@@ -12,6 +12,6 @@ struct split_key_result {
|
||||
|
||||
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
|
||||
|
||||
int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
||||
int split_key_pad_len, const u8 *key_in, u32 keylen,
|
||||
u32 alg_op);
|
||||
int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
struct alginfo * const adata, const u8 *key_in, u32 keylen,
|
||||
int max_keylen);
|
||||
|
@@ -7,7 +7,11 @@
|
||||
|
||||
#include "regs.h"
|
||||
|
||||
struct sec4_sg_entry;
|
||||
struct sec4_sg_entry {
|
||||
u64 ptr;
|
||||
u32 len;
|
||||
u32 bpid_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* convert single dma address to h/w link table format
|
||||
|
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
|
||||
goto e_pool;
|
||||
}
|
||||
|
||||
/* Initialize the queues used to wait for KSB space and suspend */
|
||||
init_waitqueue_head(&ccp->sb_queue);
|
||||
init_waitqueue_head(&ccp->suspend_queue);
|
||||
|
||||
dev_dbg(dev, "Starting threads...\n");
|
||||
/* Create a kthread for each queue */
|
||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||
|
@@ -21,6 +21,12 @@
|
||||
|
||||
#include "ccp-dev.h"
|
||||
|
||||
/* Allocate the requested number of contiguous LSB slots
|
||||
* from the LSB bitmap. Look in the private range for this
|
||||
* queue first; failing that, check the public area.
|
||||
* If no space is available, wait around.
|
||||
* Return: first slot number
|
||||
*/
|
||||
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
|
||||
{
|
||||
struct ccp_device *ccp;
|
||||
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
|
||||
bitmap_set(ccp->lsbmap, start, count);
|
||||
|
||||
mutex_unlock(&ccp->sb_mutex);
|
||||
return start * LSB_ITEM_SIZE;
|
||||
return start;
|
||||
}
|
||||
|
||||
ccp->sb_avail = 0;
|
||||
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
|
||||
}
|
||||
}
|
||||
|
||||
/* Free a number of LSB slots from the bitmap, starting at
|
||||
* the indicated starting slot number.
|
||||
*/
|
||||
static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
|
||||
unsigned int count)
|
||||
{
|
||||
int lsbno = start / LSB_SIZE;
|
||||
|
||||
if (!start)
|
||||
return;
|
||||
|
||||
if (cmd_q->lsb == lsbno) {
|
||||
if (cmd_q->lsb == start) {
|
||||
/* An entry from the private LSB */
|
||||
bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
|
||||
bitmap_clear(cmd_q->lsbmap, start, count);
|
||||
} else {
|
||||
/* From the shared LSBs */
|
||||
struct ccp_device *ccp = cmd_q->ccp;
|
||||
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
|
||||
CCP5_CMD_PROT(&desc) = 0;
|
||||
|
||||
function.raw = 0;
|
||||
CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
|
||||
CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
|
||||
CCP5_CMD_FUNCTION(&desc) = function.raw;
|
||||
|
||||
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
|
||||
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
|
||||
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
|
||||
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
|
||||
|
||||
/* Key (Exponent) is in external memory */
|
||||
CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
|
||||
CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
|
||||
CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
|
||||
/* Exponent is in LSB memory */
|
||||
CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
|
||||
CCP5_CMD_KEY_HI(&desc) = 0;
|
||||
CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
|
||||
|
||||
return ccp5_do_cmd(&desc, op->cmd_q);
|
||||
}
|
||||
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
|
||||
goto e_pool;
|
||||
}
|
||||
|
||||
/* Initialize the queue used to suspend */
|
||||
init_waitqueue_head(&ccp->suspend_queue);
|
||||
|
||||
dev_dbg(dev, "Loading LSB map...\n");
|
||||
/* Copy the private LSB mask to the public registers */
|
||||
status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
|
||||
|
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
|
||||
};
|
||||
|
||||
/* Human-readable error strings */
|
||||
char *ccp_error_codes[] = {
|
||||
static char *ccp_error_codes[] = {
|
||||
"",
|
||||
"ERR 01: ILLEGAL_ENGINE",
|
||||
"ERR 02: ILLEGAL_KEY_ID",
|
||||
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
|
||||
ccp->sb_count = KSB_COUNT;
|
||||
ccp->sb_start = 0;
|
||||
|
||||
/* Initialize the wait queues */
|
||||
init_waitqueue_head(&ccp->sb_queue);
|
||||
init_waitqueue_head(&ccp->suspend_queue);
|
||||
|
||||
ccp->ord = ccp_increment_unit_ordinal();
|
||||
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
|
||||
snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
|
||||
|
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
|
||||
/* Private LSB that is assigned to this queue, or -1 if none.
|
||||
* Bitmap for my private LSB, unused otherwise
|
||||
*/
|
||||
unsigned int lsb;
|
||||
int lsb;
|
||||
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
|
||||
|
||||
/* Queue processing thread */
|
||||
@@ -515,7 +515,6 @@ struct ccp_op {
|
||||
struct ccp_passthru_op passthru;
|
||||
struct ccp_ecc_op ecc;
|
||||
} u;
|
||||
struct ccp_mem key;
|
||||
};
|
||||
|
||||
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
|
||||
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
|
||||
* word 7: upper 16 bits of key pointer; key memory type
|
||||
*/
|
||||
struct dword0 {
|
||||
__le32 soc:1;
|
||||
__le32 ioc:1;
|
||||
__le32 rsvd1:1;
|
||||
__le32 init:1;
|
||||
__le32 eom:1; /* AES/SHA only */
|
||||
__le32 function:15;
|
||||
__le32 engine:4;
|
||||
__le32 prot:1;
|
||||
__le32 rsvd2:7;
|
||||
unsigned int soc:1;
|
||||
unsigned int ioc:1;
|
||||
unsigned int rsvd1:1;
|
||||
unsigned int init:1;
|
||||
unsigned int eom:1; /* AES/SHA only */
|
||||
unsigned int function:15;
|
||||
unsigned int engine:4;
|
||||
unsigned int prot:1;
|
||||
unsigned int rsvd2:7;
|
||||
};
|
||||
|
||||
struct dword3 {
|
||||
__le32 src_hi:16;
|
||||
__le32 src_mem:2;
|
||||
__le32 lsb_cxt_id:8;
|
||||
__le32 rsvd1:5;
|
||||
__le32 fixed:1;
|
||||
unsigned int src_hi:16;
|
||||
unsigned int src_mem:2;
|
||||
unsigned int lsb_cxt_id:8;
|
||||
unsigned int rsvd1:5;
|
||||
unsigned int fixed:1;
|
||||
};
|
||||
|
||||
union dword4 {
|
||||
@@ -567,18 +566,18 @@ union dword4 {
|
||||
|
||||
union dword5 {
|
||||
struct {
|
||||
__le32 dst_hi:16;
|
||||
__le32 dst_mem:2;
|
||||
__le32 rsvd1:13;
|
||||
__le32 fixed:1;
|
||||
unsigned int dst_hi:16;
|
||||
unsigned int dst_mem:2;
|
||||
unsigned int rsvd1:13;
|
||||
unsigned int fixed:1;
|
||||
} fields;
|
||||
__le32 sha_len_hi;
|
||||
};
|
||||
|
||||
struct dword7 {
|
||||
__le32 key_hi:16;
|
||||
__le32 key_mem:2;
|
||||
__le32 rsvd1:14;
|
||||
unsigned int key_hi:16;
|
||||
unsigned int key_mem:2;
|
||||
unsigned int rsvd1:14;
|
||||
};
|
||||
|
||||
struct ccp5_desc {
|
||||
|
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_AUTHENC
|
||||
---help---
|
||||
The Chelsio Crypto Co-processor driver for T6 adapters.
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -108,30 +108,24 @@
|
||||
#define IPAD_DATA 0x36363636
|
||||
#define OPAD_DATA 0x5c5c5c5c
|
||||
|
||||
#define TRANSHDR_SIZE(alignedkctx_len)\
|
||||
(sizeof(struct ulptx_idata) +\
|
||||
sizeof(struct ulp_txpkt) +\
|
||||
sizeof(struct fw_crypto_lookaside_wr) +\
|
||||
sizeof(struct cpl_tx_sec_pdu) +\
|
||||
(alignedkctx_len))
|
||||
#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
|
||||
(TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
|
||||
#define TRANSHDR_SIZE(kctx_len)\
|
||||
(sizeof(struct chcr_wr) +\
|
||||
kctx_len)
|
||||
#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
|
||||
(TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
|
||||
sizeof(struct cpl_rx_phys_dsgl))
|
||||
#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
|
||||
(TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
|
||||
#define HASH_TRANSHDR_SIZE(kctx_len)\
|
||||
(TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
|
||||
|
||||
#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
|
||||
sizeof(struct ulp_txpkt) + \
|
||||
sizeof(struct ulptx_idata))
|
||||
|
||||
#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \
|
||||
#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst) \
|
||||
htonl( \
|
||||
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
|
||||
CPL_TX_SEC_PDU_RXCHID_V((id)) | \
|
||||
CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
|
||||
CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
|
||||
CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
|
||||
CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
|
||||
CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
|
||||
CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
|
||||
|
||||
#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
|
||||
@@ -148,7 +142,7 @@
|
||||
CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
|
||||
CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
|
||||
|
||||
#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \
|
||||
#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size) \
|
||||
htonl( \
|
||||
SCMD_SEQ_NO_CTRL_V(0) | \
|
||||
SCMD_STATUS_PRESENT_V(0) | \
|
||||
@@ -159,7 +153,7 @@
|
||||
SCMD_AUTH_MODE_V((amode)) | \
|
||||
SCMD_HMAC_CTRL_V((opad)) | \
|
||||
SCMD_IV_SIZE_V((size)) | \
|
||||
SCMD_NUM_IVS_V((nivs)))
|
||||
SCMD_NUM_IVS_V(0))
|
||||
|
||||
#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
|
||||
SCMD_ENB_DBGID_V(0) | \
|
||||
@@ -264,13 +258,15 @@ enum {
|
||||
* where they indicate the size of the integrity check value (ICV)
|
||||
*/
|
||||
enum {
|
||||
AES_CCM_ICV_4 = 4,
|
||||
AES_CCM_ICV_6 = 6,
|
||||
AES_CCM_ICV_8 = 8,
|
||||
AES_CCM_ICV_10 = 10,
|
||||
AES_CCM_ICV_12 = 12,
|
||||
AES_CCM_ICV_14 = 14,
|
||||
AES_CCM_ICV_16 = 16
|
||||
ICV_4 = 4,
|
||||
ICV_6 = 6,
|
||||
ICV_8 = 8,
|
||||
ICV_10 = 10,
|
||||
ICV_12 = 12,
|
||||
ICV_13 = 13,
|
||||
ICV_14 = 14,
|
||||
ICV_15 = 15,
|
||||
ICV_16 = 16
|
||||
};
|
||||
|
||||
struct hash_op_params {
|
||||
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
|
||||
187, 22
|
||||
};
|
||||
|
||||
static u32 aes_ks_subword(const u32 w)
|
||||
static inline u32 aes_ks_subword(const u32 w)
|
||||
{
|
||||
u8 bytes[4];
|
||||
|
||||
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
|
||||
0x1B000000, 0x36000000, 0x6C000000
|
||||
};
|
||||
|
||||
/* dec_key - OUTPUT - Reverse round key
|
||||
* key - INPUT - key
|
||||
* keylength - INPUT - length of the key in number of bits
|
||||
*/
|
||||
static inline void get_aes_decrypt_key(unsigned char *dec_key,
|
||||
const unsigned char *key,
|
||||
unsigned int keylength)
|
||||
{
|
||||
u32 temp;
|
||||
u32 w_ring[MAX_NK];
|
||||
int i, j, k;
|
||||
u8 nr, nk;
|
||||
|
||||
switch (keylength) {
|
||||
case AES_KEYLENGTH_128BIT:
|
||||
nk = KEYLENGTH_4BYTES;
|
||||
nr = NUMBER_OF_ROUNDS_10;
|
||||
break;
|
||||
|
||||
case AES_KEYLENGTH_192BIT:
|
||||
nk = KEYLENGTH_6BYTES;
|
||||
nr = NUMBER_OF_ROUNDS_12;
|
||||
break;
|
||||
case AES_KEYLENGTH_256BIT:
|
||||
nk = KEYLENGTH_8BYTES;
|
||||
nr = NUMBER_OF_ROUNDS_14;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
for (i = 0; i < nk; i++ )
|
||||
w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
|
||||
|
||||
i = 0;
|
||||
temp = w_ring[nk - 1];
|
||||
while(i + nk < (nr + 1) * 4) {
|
||||
if(!(i % nk)) {
|
||||
/* RotWord(temp) */
|
||||
temp = (temp << 8) | (temp >> 24);
|
||||
temp = aes_ks_subword(temp);
|
||||
temp ^= round_constant[i / nk];
|
||||
}
|
||||
else if (nk == 8 && (i % 4 == 0))
|
||||
temp = aes_ks_subword(temp);
|
||||
w_ring[i % nk] ^= temp;
|
||||
temp = w_ring[i % nk];
|
||||
i++;
|
||||
}
|
||||
i--;
|
||||
for (k = 0, j = i % nk; k < nk; k++) {
|
||||
*((u32 *)dec_key + k) = htonl(w_ring[j]);
|
||||
j--;
|
||||
if(j < 0)
|
||||
j += nk;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __CHCR_ALGO_H__ */
|
||||
|
@@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
|
||||
if (ack_err_status) {
|
||||
if (CHK_MAC_ERR_BIT(ack_err_status) ||
|
||||
CHK_PAD_ERR_BIT(ack_err_status))
|
||||
error_status = -EINVAL;
|
||||
error_status = -EBADMSG;
|
||||
}
|
||||
/* call completion callback with failure status */
|
||||
if (req) {
|
||||
if (!chcr_handle_resp(req, input, error_status))
|
||||
req->complete(req, error_status);
|
||||
else
|
||||
return -EINVAL;
|
||||
error_status = chcr_handle_resp(req, input, error_status);
|
||||
req->complete(req, error_status);
|
||||
} else {
|
||||
pr_err("Incorrect request address from the firmware\n");
|
||||
return -EFAULT;
|
||||
|
@@ -52,13 +52,27 @@
|
||||
|
||||
#define MAC_ERROR_BIT 0
|
||||
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
|
||||
#define MAX_SALT 4
|
||||
|
||||
struct uld_ctx;
|
||||
|
||||
struct _key_ctx {
|
||||
__be32 ctx_hdr;
|
||||
u8 salt[MAX_SALT];
|
||||
__be64 reserverd;
|
||||
unsigned char key[0];
|
||||
};
|
||||
|
||||
struct chcr_wr {
|
||||
struct fw_crypto_lookaside_wr wreq;
|
||||
struct ulp_txpkt ulptx;
|
||||
struct ulptx_idata sc_imm;
|
||||
struct cpl_tx_sec_pdu sec_cpl;
|
||||
struct _key_ctx key_ctx;
|
||||
};
|
||||
|
||||
struct chcr_dev {
|
||||
/* Request submited to h/w and waiting for response. */
|
||||
spinlock_t lock_chcr_dev;
|
||||
struct crypto_queue pending_queue;
|
||||
struct uld_ctx *u_ctx;
|
||||
unsigned char tx_channel_id;
|
||||
};
|
||||
|
@@ -36,6 +36,14 @@
|
||||
#ifndef __CHCR_CRYPTO_H__
|
||||
#define __CHCR_CRYPTO_H__
|
||||
|
||||
#define GHASH_BLOCK_SIZE 16
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
#define CCM_B0_SIZE 16
|
||||
#define CCM_AAD_FIELD_SIZE 2
|
||||
#define T5_MAX_AAD_SIZE 512
|
||||
|
||||
|
||||
/* Define following if h/w is not dropping the AAD and IV data before
|
||||
* giving the processed data
|
||||
*/
|
||||
@@ -63,22 +71,36 @@
|
||||
#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
|
||||
#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
|
||||
|
||||
#define CHCR_SCMD_CIPHER_MODE_NOP 0
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
|
||||
#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
|
||||
#define CHCR_SCMD_CIPHER_MODE_NOP 0
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
|
||||
#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6
|
||||
#define CHCR_SCMD_CIPHER_MODE_AES_CCM 7
|
||||
|
||||
#define CHCR_SCMD_AUTH_MODE_NOP 0
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA1 1
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA224 2
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA256 3
|
||||
#define CHCR_SCMD_AUTH_MODE_GHASH 4
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA512_224 5
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA512_256 6
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA512_384 7
|
||||
#define CHCR_SCMD_AUTH_MODE_SHA512_512 8
|
||||
#define CHCR_SCMD_AUTH_MODE_CBCMAC 9
|
||||
#define CHCR_SCMD_AUTH_MODE_CMAC 10
|
||||
|
||||
#define CHCR_SCMD_HMAC_CTRL_NOP 0
|
||||
#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1
|
||||
#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366 2
|
||||
#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT 3
|
||||
#define CHCR_SCMD_HMAC_CTRL_PL1 4
|
||||
#define CHCR_SCMD_HMAC_CTRL_PL2 5
|
||||
#define CHCR_SCMD_HMAC_CTRL_PL3 6
|
||||
#define CHCR_SCMD_HMAC_CTRL_DIV2 7
|
||||
#define VERIFY_HW 0
|
||||
#define VERIFY_SW 1
|
||||
|
||||
#define CHCR_SCMD_IVGEN_CTRL_HW 0
|
||||
#define CHCR_SCMD_IVGEN_CTRL_SW 1
|
||||
@@ -106,39 +128,74 @@
|
||||
#define IV_IMMEDIATE 1
|
||||
#define IV_DSGL 2
|
||||
|
||||
#define AEAD_H_SIZE 16
|
||||
|
||||
#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
|
||||
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
|
||||
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
|
||||
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
|
||||
|
||||
#define MAX_SALT 4
|
||||
#define MAX_SCRATCH_PAD_SIZE 32
|
||||
|
||||
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
|
||||
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
|
||||
|
||||
/* Aligned to 128 bit boundary */
|
||||
struct _key_ctx {
|
||||
__be32 ctx_hdr;
|
||||
u8 salt[MAX_SALT];
|
||||
__be64 reserverd;
|
||||
unsigned char key[0];
|
||||
};
|
||||
|
||||
struct ablk_ctx {
|
||||
u8 enc;
|
||||
unsigned int processed_len;
|
||||
__be32 key_ctx_hdr;
|
||||
unsigned int enckey_len;
|
||||
unsigned int dst_nents;
|
||||
struct scatterlist iv_sg;
|
||||
u8 key[CHCR_AES_MAX_KEY_LEN];
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
unsigned char ciph_mode;
|
||||
u8 rrkey[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
struct chcr_aead_reqctx {
|
||||
struct sk_buff *skb;
|
||||
short int dst_nents;
|
||||
u16 verify;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
|
||||
};
|
||||
|
||||
struct chcr_gcm_ctx {
|
||||
u8 ghash_h[AEAD_H_SIZE];
|
||||
};
|
||||
|
||||
struct chcr_authenc_ctx {
|
||||
u8 dec_rrkey[AES_MAX_KEY_SIZE];
|
||||
u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
|
||||
unsigned char auth_mode;
|
||||
};
|
||||
|
||||
struct __aead_ctx {
|
||||
struct chcr_gcm_ctx gcm[0];
|
||||
struct chcr_authenc_ctx authenc[0];
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct chcr_aead_ctx {
|
||||
__be32 key_ctx_hdr;
|
||||
unsigned int enckey_len;
|
||||
struct crypto_skcipher *null;
|
||||
u8 salt[MAX_SALT];
|
||||
u8 key[CHCR_AES_MAX_KEY_LEN];
|
||||
u16 hmac_ctrl;
|
||||
u16 mayverify;
|
||||
struct __aead_ctx ctx[0];
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct hmac_ctx {
|
||||
struct shash_desc *desc;
|
||||
struct crypto_shash *base_hash;
|
||||
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
};
|
||||
@@ -146,6 +203,7 @@ struct hmac_ctx {
|
||||
struct __crypto_ctx {
|
||||
struct hmac_ctx hmacctx[0];
|
||||
struct ablk_ctx ablkctx[0];
|
||||
struct chcr_aead_ctx aeadctx[0];
|
||||
};
|
||||
|
||||
struct chcr_context {
|
||||
@@ -156,18 +214,22 @@ struct chcr_context {
|
||||
|
||||
struct chcr_ahash_req_ctx {
|
||||
u32 result;
|
||||
char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
u8 bfr_len;
|
||||
u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||
u8 *reqbfr;
|
||||
u8 *skbfr;
|
||||
u8 reqlen;
|
||||
/* DMA the partial hash in it */
|
||||
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
|
||||
u64 data_len; /* Data len till time */
|
||||
void *dummy_payload_ptr;
|
||||
/* SKB which is being sent to the hardware for processing */
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
struct chcr_blkcipher_req_ctx {
|
||||
struct sk_buff *skb;
|
||||
unsigned int dst_nents;
|
||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||
};
|
||||
|
||||
struct chcr_alg_template {
|
||||
@@ -176,16 +238,19 @@ struct chcr_alg_template {
|
||||
union {
|
||||
struct crypto_alg crypto;
|
||||
struct ahash_alg hash;
|
||||
struct aead_alg aead;
|
||||
} alg;
|
||||
};
|
||||
|
||||
struct chcr_req_ctx {
|
||||
union {
|
||||
struct ahash_request *ahash_req;
|
||||
struct aead_request *aead_req;
|
||||
struct ablkcipher_request *ablk_req;
|
||||
} req;
|
||||
union {
|
||||
struct chcr_ahash_req_ctx *ahash_ctx;
|
||||
struct chcr_aead_reqctx *reqctx;
|
||||
struct chcr_blkcipher_req_ctx *ablk_ctx;
|
||||
} ctx;
|
||||
};
|
||||
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
|
||||
dma_addr_t addr[MAX_SKB_FRAGS + 1];
|
||||
};
|
||||
|
||||
typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
|
||||
struct chcr_context *ctx,
|
||||
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
|
||||
unsigned short qid,
|
||||
int size,
|
||||
unsigned short op_type);
|
||||
|
||||
static int chcr_aead_op(struct aead_request *req_base,
|
||||
unsigned short op_type,
|
||||
int size,
|
||||
create_wr_t create_wr_fn);
|
||||
static inline int get_aead_subtype(struct crypto_aead *aead);
|
||||
|
||||
#endif /* __CHCR_CRYPTO_H__ */
|
||||
|
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
|
||||
if (!dma->padding_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
|
||||
if (!dma->iv_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
cesa->dma = dma;
|
||||
|
||||
return 0;
|
||||
|
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
|
||||
#define CESA_TDMA_DUMMY 0
|
||||
#define CESA_TDMA_DATA 1
|
||||
#define CESA_TDMA_OP 2
|
||||
#define CESA_TDMA_IV 3
|
||||
#define CESA_TDMA_RESULT 3
|
||||
|
||||
/**
|
||||
* struct mv_cesa_tdma_desc - TDMA descriptor
|
||||
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
|
||||
struct dma_pool *op_pool;
|
||||
struct dma_pool *cache_pool;
|
||||
struct dma_pool *padding_pool;
|
||||
struct dma_pool *iv_pool;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
|
||||
memset(chain, 0, sizeof(*chain));
|
||||
}
|
||||
|
||||
int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
|
||||
int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
|
||||
u32 size, u32 flags, gfp_t gfp_flags);
|
||||
|
||||
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
|
||||
|
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
|
||||
struct mv_cesa_req *basereq;
|
||||
|
||||
basereq = &creq->base;
|
||||
memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
|
||||
memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
|
||||
ivsize);
|
||||
} else {
|
||||
memcpy_fromio(ablkreq->info,
|
||||
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
|
||||
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
|
||||
|
||||
/* Add output data for IV */
|
||||
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
|
||||
ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
|
||||
ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
|
||||
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
|
||||
CESA_SA_DATA_SRAM_OFFSET,
|
||||
CESA_TDMA_SRC_IN_SRAM, flags);
|
||||
|
||||
if (ret)
|
||||
goto err_free_tdma;
|
||||
|
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
|
||||
int i;
|
||||
|
||||
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
|
||||
|
||||
if (creq->last_req) {
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
|
||||
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
|
||||
__le32 *data = NULL;
|
||||
|
||||
/*
|
||||
* Hardware's MD5 digest is in little endian format, but
|
||||
* SHA in big endian format
|
||||
* Result is already in the correct endianess when the SA is
|
||||
* used
|
||||
*/
|
||||
if (creq->algo_le) {
|
||||
__le32 *result = (void *)ahashreq->result;
|
||||
data = creq->base.chain.last->op->ctx.hash.hash;
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
creq->state[i] = cpu_to_le32(data[i]);
|
||||
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
result[i] = cpu_to_le32(creq->state[i]);
|
||||
} else {
|
||||
__be32 *result = (void *)ahashreq->result;
|
||||
memcpy(ahashreq->result, data, digsize);
|
||||
} else {
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
creq->state[i] = readl_relaxed(engine->regs +
|
||||
CESA_IVDIG(i));
|
||||
if (creq->last_req) {
|
||||
/*
|
||||
* Hardware's MD5 digest is in little endian format, but
|
||||
* SHA in big endian format
|
||||
*/
|
||||
if (creq->algo_le) {
|
||||
__le32 *result = (void *)ahashreq->result;
|
||||
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
result[i] = cpu_to_be32(creq->state[i]);
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
result[i] = cpu_to_le32(creq->state[i]);
|
||||
} else {
|
||||
__be32 *result = (void *)ahashreq->result;
|
||||
|
||||
for (i = 0; i < digsize / 4; i++)
|
||||
result[i] = cpu_to_be32(creq->state[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
|
||||
CESA_SA_DESC_CFG_LAST_FRAG,
|
||||
CESA_SA_DESC_CFG_FRAG_MSK);
|
||||
|
||||
ret = mv_cesa_dma_add_result_op(chain,
|
||||
CESA_SA_CFG_SRAM_OFFSET,
|
||||
CESA_SA_DATA_SRAM_OFFSET,
|
||||
CESA_TDMA_SRC_IN_SRAM, flags);
|
||||
if (ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return op;
|
||||
}
|
||||
|
||||
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
||||
struct mv_cesa_op_ctx *op = NULL;
|
||||
unsigned int frag_len;
|
||||
int ret;
|
||||
u32 type;
|
||||
|
||||
basereq->chain.first = NULL;
|
||||
basereq->chain.last = NULL;
|
||||
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
||||
goto err_free_tdma;
|
||||
}
|
||||
|
||||
if (op) {
|
||||
/*
|
||||
* If results are copied via DMA, this means that this
|
||||
* request can be directly processed by the engine,
|
||||
* without partial updates. So we can chain it at the
|
||||
* DMA level with other requests.
|
||||
*/
|
||||
type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
|
||||
|
||||
if (op && type != CESA_TDMA_RESULT) {
|
||||
/* Add dummy desc to wait for crypto operation end */
|
||||
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
|
||||
if (ret)
|
||||
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
||||
else
|
||||
creq->cache_ptr = 0;
|
||||
|
||||
basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
|
||||
CESA_TDMA_BREAK_CHAIN);
|
||||
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
|
||||
|
||||
if (type != CESA_TDMA_RESULT)
|
||||
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
|
||||
if (type == CESA_TDMA_OP)
|
||||
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
|
||||
le32_to_cpu(tdma->src));
|
||||
else if (type == CESA_TDMA_IV)
|
||||
dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
|
||||
le32_to_cpu(tdma->dst));
|
||||
|
||||
tdma = tdma->next;
|
||||
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
|
||||
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
|
||||
return new_tdma;
|
||||
}
|
||||
|
||||
int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
|
||||
int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
|
||||
u32 size, u32 flags, gfp_t gfp_flags)
|
||||
{
|
||||
|
||||
struct mv_cesa_tdma_desc *tdma;
|
||||
u8 *iv;
|
||||
dma_addr_t dma_handle;
|
||||
struct mv_cesa_tdma_desc *tdma, *op_desc;
|
||||
|
||||
tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
|
||||
if (IS_ERR(tdma))
|
||||
return PTR_ERR(tdma);
|
||||
|
||||
iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
|
||||
if (!iv)
|
||||
return -ENOMEM;
|
||||
/* We re-use an existing op_desc object to retrieve the context
|
||||
* and result instead of allocating a new one.
|
||||
* There is at least one object of this type in a CESA crypto
|
||||
* req, just pick the first one in the chain.
|
||||
*/
|
||||
for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
|
||||
u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
|
||||
|
||||
if (type == CESA_TDMA_OP)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!op_desc)
|
||||
return -EIO;
|
||||
|
||||
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
|
||||
tdma->src = src;
|
||||
tdma->dst = cpu_to_le32(dma_handle);
|
||||
tdma->data = iv;
|
||||
tdma->dst = op_desc->src;
|
||||
tdma->op = op_desc->op;
|
||||
|
||||
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
|
||||
tdma->flags = flags | CESA_TDMA_IV;
|
||||
tdma->flags = flags | CESA_TDMA_RESULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
|
||||
if (!res)
|
||||
return -ENXIO;
|
||||
|
||||
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
||||
cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
|
||||
if (!cp)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1163,7 +1163,6 @@ err_irq:
|
||||
err_thread:
|
||||
kthread_stop(cp->queue_th);
|
||||
err:
|
||||
kfree(cp);
|
||||
cpg = NULL;
|
||||
return ret;
|
||||
}
|
||||
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
|
||||
clk_put(cp->clk);
|
||||
}
|
||||
|
||||
kfree(cp);
|
||||
cpg = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -32,7 +32,6 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/vio.h>
|
||||
|
||||
|
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
|
||||
if (status & SAHARA_STATUS_MODE_BATCH)
|
||||
dev_dbg(dev->device, " - Batch Mode.\n");
|
||||
else if (status & SAHARA_STATUS_MODE_DEDICATED)
|
||||
dev_dbg(dev->device, " - Decidated Mode.\n");
|
||||
dev_dbg(dev->device, " - Dedicated Mode.\n");
|
||||
else if (status & SAHARA_STATUS_MODE_DEBUG)
|
||||
dev_dbg(dev->device, " - Debug Mode.\n");
|
||||
|
||||
|
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
|
||||
if (v_lo & TALITOS_CCPSR_LO_MDTE)
|
||||
dev_err(dev, "master data transfer error\n");
|
||||
if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
|
||||
dev_err(dev, is_sec1 ? "pointeur not complete error\n"
|
||||
dev_err(dev, is_sec1 ? "pointer not complete error\n"
|
||||
: "s/g data length zero error\n");
|
||||
if (v_lo & TALITOS_CCPSR_LO_FPZ)
|
||||
dev_err(dev, is_sec1 ? "parity error\n"
|
||||
|
@@ -10,10 +10,12 @@ endif
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
|
||||
|
||||
$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
|
||||
$(call cmd,perl)
|
||||
targets += aesp8-ppc.S ghashp8-ppc.S
|
||||
|
||||
.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
|
||||
$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
|
||||
$(call if_changed,perl)
|
||||
|
||||
$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
|
||||
$(call if_changed,perl)
|
||||
|
||||
clean-files := aesp8-ppc.S ghashp8-ppc.S
|
||||
|
Reference in New Issue
Block a user