Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 5.3: API: - Test shash interface directly in testmgr - cra_driver_name is now mandatory Algorithms: - Replace arc4 crypto_cipher with library helper - Implement 5 way interleave for ECB, CBC and CTR on arm64 - Add xxhash - Add continuous self-test on noise source to drbg - Update jitter RNG Drivers: - Add support for SHA204A random number generator - Add support for 7211 in iproc-rng200 - Fix fuzz test failures in inside-secure - Fix fuzz test failures in talitos - Fix fuzz test failures in qat" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits) crypto: stm32/hash - remove interruptible condition for dma crypto: stm32/hash - Fix hmac issue more than 256 bytes crypto: stm32/crc32 - rename driver file crypto: amcc - remove memset after dma_alloc_coherent crypto: ccp - Switch to SPDX license identifiers crypto: ccp - Validate the the error value used to index error messages crypto: doc - Fix formatting of new crypto engine content crypto: doc - Add parameter documentation crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR crypto: arm64/aes-ce - add 5 way interleave routines crypto: talitos - drop icv_ool crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: asymmetric_keys - select CRYPTO_HASH where needed crypto: serpent - mark __serpent_setkey_sbox noinline crypto: testmgr - dynamically allocate crypto_shash crypto: testmgr - dynamically allocate testvec_config crypto: talitos - eliminate unneeded 'done' functions at build time ...
Этот коммит содержится в:
@@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called atmel-sha.
|
||||
|
||||
config CRYPTO_DEV_ATMEL_I2C
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_ATMEL_ECC
|
||||
tristate "Support for Microchip / Atmel ECC hw accelerator"
|
||||
depends on ARCH_AT91 || COMPILE_TEST
|
||||
depends on I2C
|
||||
select CRYPTO_DEV_ATMEL_I2C
|
||||
select CRYPTO_ECDH
|
||||
select CRC16
|
||||
help
|
||||
@@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called atmel-ecc.
|
||||
|
||||
config CRYPTO_DEV_ATMEL_SHA204A
|
||||
tristate "Support for Microchip / Atmel SHA accelerator and RNG"
|
||||
depends on I2C
|
||||
select CRYPTO_DEV_ATMEL_I2C
|
||||
select HW_RANDOM
|
||||
select CRC16
|
||||
help
|
||||
Microhip / Atmel SHA accelerator and RNG.
|
||||
Select this if you want to use the Microchip / Atmel SHA204A
|
||||
module as a random number generator. (Other functions of the
|
||||
chip are currently not exposed by this driver)
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called atmel-sha204a.
|
||||
|
||||
config CRYPTO_DEV_CCP
|
||||
bool "Support for AMD Secure Processor"
|
||||
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
|
||||
|
@@ -2,7 +2,9 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
|
||||
|
@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
|
||||
}
|
||||
|
||||
static inline int crypto4xx_crypt(struct skcipher_request *req,
|
||||
const unsigned int ivlen, bool decrypt)
|
||||
const unsigned int ivlen, bool decrypt,
|
||||
bool check_blocksize)
|
||||
{
|
||||
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
|
||||
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
|
||||
__le32 iv[AES_IV_SIZE];
|
||||
|
||||
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
if (ivlen)
|
||||
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
|
||||
|
||||
@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
|
||||
ctx->sa_len, 0, NULL);
|
||||
}
|
||||
|
||||
int crypto4xx_encrypt_noiv(struct skcipher_request *req)
|
||||
int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, 0, false);
|
||||
return crypto4xx_crypt(req, 0, false, true);
|
||||
}
|
||||
|
||||
int crypto4xx_encrypt_iv(struct skcipher_request *req)
|
||||
int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, false);
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt_noiv(struct skcipher_request *req)
|
||||
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, 0, true);
|
||||
return crypto4xx_crypt(req, 0, true, true);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt_iv(struct skcipher_request *req)
|
||||
int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, true);
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
|
||||
}
|
||||
|
||||
int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
|
||||
}
|
||||
|
||||
int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
|
||||
{
|
||||
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return encrypt ? crypto4xx_encrypt_iv(req)
|
||||
: crypto4xx_decrypt_iv(req);
|
||||
return encrypt ? crypto4xx_encrypt_iv_stream(req)
|
||||
: crypto4xx_decrypt_iv_stream(req);
|
||||
}
|
||||
|
||||
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
|
||||
|
@@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
||||
dev->pdr_pa);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
|
||||
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
|
||||
sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
|
||||
&dev->shadow_sa_pool_pa,
|
||||
@@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_IV_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_cbc,
|
||||
.encrypt = crypto4xx_encrypt_iv,
|
||||
.decrypt = crypto4xx_decrypt_iv,
|
||||
.encrypt = crypto4xx_encrypt_iv_block,
|
||||
.decrypt = crypto4xx_decrypt_iv_block,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
@@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
@@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_IV_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_cfb,
|
||||
.encrypt = crypto4xx_encrypt_iv,
|
||||
.decrypt = crypto4xx_decrypt_iv,
|
||||
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
@@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
@@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
@@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_ecb,
|
||||
.encrypt = crypto4xx_encrypt_noiv,
|
||||
.decrypt = crypto4xx_decrypt_noiv,
|
||||
.encrypt = crypto4xx_encrypt_noiv_block,
|
||||
.decrypt = crypto4xx_decrypt_noiv_block,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
@@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
@@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_IV_SIZE,
|
||||
.setkey = crypto4xx_setkey_aes_ofb,
|
||||
.encrypt = crypto4xx_encrypt_iv,
|
||||
.decrypt = crypto4xx_decrypt_iv,
|
||||
.encrypt = crypto4xx_encrypt_iv_stream,
|
||||
.decrypt = crypto4xx_decrypt_iv_stream,
|
||||
.init = crypto4xx_sk_init,
|
||||
.exit = crypto4xx_sk_exit,
|
||||
} },
|
||||
|
@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int keylen);
|
||||
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
|
||||
int crypto4xx_encrypt_iv(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_iv(struct skcipher_request *req);
|
||||
int crypto4xx_encrypt_noiv(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_noiv(struct skcipher_request *req);
|
||||
int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
|
||||
int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
|
||||
int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
|
||||
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
|
||||
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
|
||||
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
|
||||
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
|
||||
|
@@ -6,8 +6,6 @@
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/crc16.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
@@ -23,41 +21,10 @@
|
||||
#include <crypto/internal/kpp.h>
|
||||
#include <crypto/ecdh.h>
|
||||
#include <crypto/kpp.h>
|
||||
#include "atmel-ecc.h"
|
||||
|
||||
/* Used for binding tfm objects to i2c clients. */
|
||||
struct atmel_ecc_driver_data {
|
||||
struct list_head i2c_client_list;
|
||||
spinlock_t i2c_list_lock;
|
||||
} ____cacheline_aligned;
|
||||
#include "atmel-i2c.h"
|
||||
|
||||
static struct atmel_ecc_driver_data driver_data;
|
||||
|
||||
/**
|
||||
* atmel_ecc_i2c_client_priv - i2c_client private data
|
||||
* @client : pointer to i2c client device
|
||||
* @i2c_client_list_node: part of i2c_client_list
|
||||
* @lock : lock for sending i2c commands
|
||||
* @wake_token : wake token array of zeros
|
||||
* @wake_token_sz : size in bytes of the wake_token
|
||||
* @tfm_count : number of active crypto transformations on i2c client
|
||||
*
|
||||
* Reads and writes from/to the i2c client are sequential. The first byte
|
||||
* transmitted to the device is treated as the byte size. Any attempt to send
|
||||
* more than this number of bytes will cause the device to not ACK those bytes.
|
||||
* After the host writes a single command byte to the input buffer, reads are
|
||||
* prohibited until after the device completes command execution. Use a mutex
|
||||
* when sending i2c commands.
|
||||
*/
|
||||
struct atmel_ecc_i2c_client_priv {
|
||||
struct i2c_client *client;
|
||||
struct list_head i2c_client_list_node;
|
||||
struct mutex lock;
|
||||
u8 wake_token[WAKE_TOKEN_MAX_SIZE];
|
||||
size_t wake_token_sz;
|
||||
atomic_t tfm_count ____cacheline_aligned;
|
||||
};
|
||||
|
||||
/**
|
||||
* atmel_ecdh_ctx - transformation context
|
||||
* @client : pointer to i2c client device
|
||||
@@ -80,188 +47,12 @@ struct atmel_ecdh_ctx {
|
||||
bool do_fallback;
|
||||
};
|
||||
|
||||
/**
|
||||
* atmel_ecc_work_data - data structure representing the work
|
||||
* @ctx : transformation context.
|
||||
* @cbk : pointer to a callback function to be invoked upon completion of this
|
||||
* request. This has the form:
|
||||
* callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
|
||||
* where:
|
||||
* @work_data: data structure representing the work
|
||||
* @areq : optional pointer to an argument passed with the original
|
||||
* request.
|
||||
* @status : status returned from the i2c client device or i2c error.
|
||||
* @areq: optional pointer to a user argument for use at callback time.
|
||||
* @work: describes the task to be executed.
|
||||
* @cmd : structure used for communicating with the device.
|
||||
*/
|
||||
struct atmel_ecc_work_data {
|
||||
struct atmel_ecdh_ctx *ctx;
|
||||
void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
|
||||
int status);
|
||||
void *areq;
|
||||
struct work_struct work;
|
||||
struct atmel_ecc_cmd cmd;
|
||||
};
|
||||
|
||||
static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
|
||||
{
|
||||
return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
|
||||
}
|
||||
|
||||
/**
|
||||
* atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
|
||||
* CRC16 verification of the count, opcode, param1, param2 and data bytes.
|
||||
* The checksum is saved in little-endian format in the least significant
|
||||
* two bytes of the command. CRC polynomial is 0x8005 and the initial register
|
||||
* value should be zero.
|
||||
*
|
||||
* @cmd : structure used for communicating with the device.
|
||||
*/
|
||||
static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
|
||||
{
|
||||
u8 *data = &cmd->count;
|
||||
size_t len = cmd->count - CRC_SIZE;
|
||||
u16 *crc16 = (u16 *)(data + len);
|
||||
|
||||
*crc16 = atmel_ecc_crc16(0, data, len);
|
||||
}
|
||||
|
||||
static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
|
||||
{
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->opcode = OPCODE_READ;
|
||||
/*
|
||||
* Read the word from Configuration zone that contains the lock bytes
|
||||
* (UserExtra, Selector, LockValue, LockConfig).
|
||||
*/
|
||||
cmd->param1 = CONFIG_ZONE;
|
||||
cmd->param2 = DEVICE_LOCK_ADDR;
|
||||
cmd->count = READ_COUNT;
|
||||
|
||||
atmel_ecc_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_READ;
|
||||
cmd->rxsize = READ_RSP_SIZE;
|
||||
}
|
||||
|
||||
static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
|
||||
{
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->count = GENKEY_COUNT;
|
||||
cmd->opcode = OPCODE_GENKEY;
|
||||
cmd->param1 = GENKEY_MODE_PRIVATE;
|
||||
/* a random private key will be generated and stored in slot keyID */
|
||||
cmd->param2 = cpu_to_le16(keyid);
|
||||
|
||||
atmel_ecc_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_GENKEY;
|
||||
cmd->rxsize = GENKEY_RSP_SIZE;
|
||||
}
|
||||
|
||||
static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
|
||||
struct scatterlist *pubkey)
|
||||
{
|
||||
size_t copied;
|
||||
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->count = ECDH_COUNT;
|
||||
cmd->opcode = OPCODE_ECDH;
|
||||
cmd->param1 = ECDH_PREFIX_MODE;
|
||||
/* private key slot */
|
||||
cmd->param2 = cpu_to_le16(DATA_SLOT_2);
|
||||
|
||||
/*
|
||||
* The device only supports NIST P256 ECC keys. The public key size will
|
||||
* always be the same. Use a macro for the key size to avoid unnecessary
|
||||
* computations.
|
||||
*/
|
||||
copied = sg_copy_to_buffer(pubkey,
|
||||
sg_nents_for_len(pubkey,
|
||||
ATMEL_ECC_PUBKEY_SIZE),
|
||||
cmd->data, ATMEL_ECC_PUBKEY_SIZE);
|
||||
if (copied != ATMEL_ECC_PUBKEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
atmel_ecc_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_ECDH;
|
||||
cmd->rxsize = ECDH_RSP_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* After wake and after execution of a command, there will be error, status, or
|
||||
* result bytes in the device's output register that can be retrieved by the
|
||||
* system. When the length of that group is four bytes, the codes returned are
|
||||
* detailed in error_list.
|
||||
*/
|
||||
static int atmel_ecc_status(struct device *dev, u8 *status)
|
||||
{
|
||||
size_t err_list_len = ARRAY_SIZE(error_list);
|
||||
int i;
|
||||
u8 err_id = status[1];
|
||||
|
||||
if (*status != STATUS_SIZE)
|
||||
return 0;
|
||||
|
||||
if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < err_list_len; i++)
|
||||
if (error_list[i].value == err_id)
|
||||
break;
|
||||
|
||||
/* if err_id is not in the error_list then ignore it */
|
||||
if (i != err_list_len) {
|
||||
dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
|
||||
return err_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int atmel_ecc_wakeup(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
u8 status[STATUS_RSP_SIZE];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The device ignores any levels or transitions on the SCL pin when the
|
||||
* device is idle, asleep or during waking up. Don't check for error
|
||||
* when waking up the device.
|
||||
*/
|
||||
i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
|
||||
|
||||
/*
|
||||
* Wait to wake the device. Typical execution times for ecdh and genkey
|
||||
* are around tens of milliseconds. Delta is chosen to 50 microseconds.
|
||||
*/
|
||||
usleep_range(TWHI_MIN, TWHI_MAX);
|
||||
|
||||
ret = i2c_master_recv(client, status, STATUS_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return atmel_ecc_status(&client->dev, status);
|
||||
}
|
||||
|
||||
static int atmel_ecc_sleep(struct i2c_client *client)
|
||||
{
|
||||
u8 sleep = SLEEP_TOKEN;
|
||||
|
||||
return i2c_master_send(client, &sleep, 1);
|
||||
}
|
||||
|
||||
static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
|
||||
static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
|
||||
int status)
|
||||
{
|
||||
struct kpp_request *req = areq;
|
||||
struct atmel_ecdh_ctx *ctx = work_data->ctx;
|
||||
struct atmel_ecc_cmd *cmd = &work_data->cmd;
|
||||
struct atmel_i2c_cmd *cmd = &work_data->cmd;
|
||||
size_t copied, n_sz;
|
||||
|
||||
if (status)
|
||||
@@ -282,82 +73,6 @@ free_work_data:
|
||||
kpp_request_complete(req, status);
|
||||
}
|
||||
|
||||
/*
|
||||
* atmel_ecc_send_receive() - send a command to the device and receive its
|
||||
* response.
|
||||
* @client: i2c client device
|
||||
* @cmd : structure used to communicate with the device
|
||||
*
|
||||
* After the device receives a Wake token, a watchdog counter starts within the
|
||||
* device. After the watchdog timer expires, the device enters sleep mode
|
||||
* regardless of whether some I/O transmission or command execution is in
|
||||
* progress. If a command is attempted when insufficient time remains prior to
|
||||
* watchdog timer execution, the device will return the watchdog timeout error
|
||||
* code without attempting to execute the command. There is no way to reset the
|
||||
* counter other than to put the device into sleep or idle mode and then
|
||||
* wake it up again.
|
||||
*/
|
||||
static int atmel_ecc_send_receive(struct i2c_client *client,
|
||||
struct atmel_ecc_cmd *cmd)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&i2c_priv->lock);
|
||||
|
||||
ret = atmel_ecc_wakeup(client);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* send the command */
|
||||
ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* delay the appropriate amount of time for command to execute */
|
||||
msleep(cmd->msecs);
|
||||
|
||||
/* receive the response */
|
||||
ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* put the device into low-power mode */
|
||||
ret = atmel_ecc_sleep(client);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
mutex_unlock(&i2c_priv->lock);
|
||||
return atmel_ecc_status(&client->dev, cmd->data);
|
||||
err:
|
||||
mutex_unlock(&i2c_priv->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void atmel_ecc_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct atmel_ecc_work_data *work_data =
|
||||
container_of(work, struct atmel_ecc_work_data, work);
|
||||
struct atmel_ecc_cmd *cmd = &work_data->cmd;
|
||||
struct i2c_client *client = work_data->ctx->client;
|
||||
int status;
|
||||
|
||||
status = atmel_ecc_send_receive(client, cmd);
|
||||
work_data->cbk(work_data, work_data->areq, status);
|
||||
}
|
||||
|
||||
static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
|
||||
void (*cbk)(struct atmel_ecc_work_data *work_data,
|
||||
void *areq, int status),
|
||||
void *areq)
|
||||
{
|
||||
work_data->cbk = (void *)cbk;
|
||||
work_data->areq = areq;
|
||||
|
||||
INIT_WORK(&work_data->work, atmel_ecc_work_handler);
|
||||
schedule_work(&work_data->work);
|
||||
}
|
||||
|
||||
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
|
||||
{
|
||||
if (curve_id == ECC_CURVE_NIST_P256)
|
||||
@@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
unsigned int len)
|
||||
{
|
||||
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
||||
struct atmel_ecc_cmd *cmd;
|
||||
struct atmel_i2c_cmd *cmd;
|
||||
void *public_key;
|
||||
struct ecdh params;
|
||||
int ret = -ENOMEM;
|
||||
@@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
|
||||
ctx->do_fallback = false;
|
||||
ctx->curve_id = params.curve_id;
|
||||
|
||||
atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
|
||||
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
|
||||
|
||||
ret = atmel_ecc_send_receive(ctx->client, cmd);
|
||||
ret = atmel_i2c_send_receive(ctx->client, cmd);
|
||||
if (ret)
|
||||
goto free_public_key;
|
||||
|
||||
@@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
|
||||
return crypto_kpp_generate_public_key(req);
|
||||
}
|
||||
|
||||
if (!ctx->public_key)
|
||||
return -EINVAL;
|
||||
|
||||
/* might want less than we've got */
|
||||
nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
|
||||
|
||||
@@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
|
||||
{
|
||||
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
|
||||
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
|
||||
struct atmel_ecc_work_data *work_data;
|
||||
struct atmel_i2c_work_data *work_data;
|
||||
gfp_t gfp;
|
||||
int ret;
|
||||
|
||||
@@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
|
||||
return -ENOMEM;
|
||||
|
||||
work_data->ctx = ctx;
|
||||
work_data->client = ctx->client;
|
||||
|
||||
ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
|
||||
ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src);
|
||||
if (ret)
|
||||
goto free_work_data;
|
||||
|
||||
atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
|
||||
atmel_i2c_enqueue(work_data, atmel_ecdh_done, req);
|
||||
|
||||
return -EINPROGRESS;
|
||||
|
||||
@@ -498,7 +217,7 @@ free_work_data:
|
||||
|
||||
static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
|
||||
struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
|
||||
struct i2c_client *client = ERR_PTR(-ENODEV);
|
||||
int min_tfm_cnt = INT_MAX;
|
||||
int tfm_cnt;
|
||||
@@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
|
||||
|
||||
static void atmel_ecc_i2c_client_free(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
|
||||
atomic_dec(&i2c_priv->tfm_count);
|
||||
}
|
||||
@@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = {
|
||||
},
|
||||
};
|
||||
|
||||
static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
|
||||
{
|
||||
u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
|
||||
|
||||
/* return the size of the wake_token in bytes */
|
||||
return DIV_ROUND_UP(no_of_bits, 8);
|
||||
}
|
||||
|
||||
static int device_sanity_check(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_ecc_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
atmel_ecc_init_read_cmd(cmd);
|
||||
|
||||
ret = atmel_ecc_send_receive(client, cmd);
|
||||
if (ret)
|
||||
goto free_cmd;
|
||||
|
||||
/*
|
||||
* It is vital that the Configuration, Data and OTP zones be locked
|
||||
* prior to release into the field of the system containing the device.
|
||||
* Failure to lock these zones may permit modification of any secret
|
||||
* keys and may lead to other security problems.
|
||||
*/
|
||||
if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
|
||||
dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
|
||||
ret = -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
free_cmd:
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int atmel_ecc_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv;
|
||||
struct device *dev = &client->dev;
|
||||
struct atmel_i2c_client_priv *i2c_priv;
|
||||
int ret;
|
||||
u32 bus_clk_rate;
|
||||
|
||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||
dev_err(dev, "I2C_FUNC_I2C not supported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(client->adapter->dev.of_node,
|
||||
"clock-frequency", &bus_clk_rate);
|
||||
if (ret) {
|
||||
dev_err(dev, "of: failed to read clock-frequency property\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (bus_clk_rate > 1000000L) {
|
||||
dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
|
||||
bus_clk_rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
|
||||
if (!i2c_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
i2c_priv->client = client;
|
||||
mutex_init(&i2c_priv->lock);
|
||||
|
||||
/*
|
||||
* WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
|
||||
* 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
|
||||
* will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
|
||||
*/
|
||||
i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
|
||||
|
||||
memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
|
||||
|
||||
atomic_set(&i2c_priv->tfm_count, 0);
|
||||
|
||||
i2c_set_clientdata(client, i2c_priv);
|
||||
|
||||
ret = device_sanity_check(client);
|
||||
ret = atmel_i2c_probe(client, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i2c_priv = i2c_get_clientdata(client);
|
||||
|
||||
spin_lock(&driver_data.i2c_list_lock);
|
||||
list_add_tail(&i2c_priv->i2c_client_list_node,
|
||||
&driver_data.i2c_client_list);
|
||||
@@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client,
|
||||
list_del(&i2c_priv->i2c_client_list_node);
|
||||
spin_unlock(&driver_data.i2c_list_lock);
|
||||
|
||||
dev_err(dev, "%s alg registration failed\n",
|
||||
dev_err(&client->dev, "%s alg registration failed\n",
|
||||
atmel_ecdh.base.cra_driver_name);
|
||||
} else {
|
||||
dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
|
||||
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
|
||||
|
||||
static int atmel_ecc_remove(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
|
||||
/* Return EBUSY if i2c client already allocated. */
|
||||
if (atomic_read(&i2c_priv->tfm_count)) {
|
||||
|
@@ -1,116 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017, Microchip Technology Inc.
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*/
|
||||
|
||||
#ifndef __ATMEL_ECC_H__
|
||||
#define __ATMEL_ECC_H__
|
||||
|
||||
#define ATMEL_ECC_PRIORITY 300
|
||||
|
||||
#define COMMAND 0x03 /* packet function */
|
||||
#define SLEEP_TOKEN 0x01
|
||||
#define WAKE_TOKEN_MAX_SIZE 8
|
||||
|
||||
/* Definitions of Data and Command sizes */
|
||||
#define WORD_ADDR_SIZE 1
|
||||
#define COUNT_SIZE 1
|
||||
#define CRC_SIZE 2
|
||||
#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
|
||||
|
||||
/* size in bytes of the n prime */
|
||||
#define ATMEL_ECC_NIST_P256_N_SIZE 32
|
||||
#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
|
||||
|
||||
#define STATUS_RSP_SIZE 4
|
||||
#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
||||
#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
|
||||
CMD_OVERHEAD_SIZE)
|
||||
#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
|
||||
#define MAX_RSP_SIZE GENKEY_RSP_SIZE
|
||||
|
||||
/**
|
||||
* atmel_ecc_cmd - structure used for communicating with the device.
|
||||
* @word_addr: indicates the function of the packet sent to the device. This
|
||||
* byte should have a value of COMMAND for normal operation.
|
||||
* @count : number of bytes to be transferred to (or from) the device.
|
||||
* @opcode : the command code.
|
||||
* @param1 : the first parameter; always present.
|
||||
* @param2 : the second parameter; always present.
|
||||
* @data : optional remaining input data. Includes a 2-byte CRC.
|
||||
* @rxsize : size of the data received from i2c client.
|
||||
* @msecs : command execution time in milliseconds
|
||||
*/
|
||||
struct atmel_ecc_cmd {
|
||||
u8 word_addr;
|
||||
u8 count;
|
||||
u8 opcode;
|
||||
u8 param1;
|
||||
u16 param2;
|
||||
u8 data[MAX_RSP_SIZE];
|
||||
u8 msecs;
|
||||
u16 rxsize;
|
||||
} __packed;
|
||||
|
||||
/* Status/Error codes */
|
||||
#define STATUS_SIZE 0x04
|
||||
#define STATUS_NOERR 0x00
|
||||
#define STATUS_WAKE_SUCCESSFUL 0x11
|
||||
|
||||
static const struct {
|
||||
u8 value;
|
||||
const char *error_text;
|
||||
} error_list[] = {
|
||||
{ 0x01, "CheckMac or Verify miscompare" },
|
||||
{ 0x03, "Parse Error" },
|
||||
{ 0x05, "ECC Fault" },
|
||||
{ 0x0F, "Execution Error" },
|
||||
{ 0xEE, "Watchdog about to expire" },
|
||||
{ 0xFF, "CRC or other communication error" },
|
||||
};
|
||||
|
||||
/* Definitions for eeprom organization */
|
||||
#define CONFIG_ZONE 0
|
||||
|
||||
/* Definitions for Indexes common to all commands */
|
||||
#define RSP_DATA_IDX 1 /* buffer index of data in response */
|
||||
#define DATA_SLOT_2 2 /* used for ECDH private key */
|
||||
|
||||
/* Definitions for the device lock state */
|
||||
#define DEVICE_LOCK_ADDR 0x15
|
||||
#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
|
||||
#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
|
||||
|
||||
/*
|
||||
* Wake High delay to data communication (microseconds). SDA should be stable
|
||||
* high for this entire duration.
|
||||
*/
|
||||
#define TWHI_MIN 1500
|
||||
#define TWHI_MAX 1550
|
||||
|
||||
/* Wake Low duration */
|
||||
#define TWLO_USEC 60
|
||||
|
||||
/* Command execution time (milliseconds) */
|
||||
#define MAX_EXEC_TIME_ECDH 58
|
||||
#define MAX_EXEC_TIME_GENKEY 115
|
||||
#define MAX_EXEC_TIME_READ 1
|
||||
|
||||
/* Command opcode */
|
||||
#define OPCODE_ECDH 0x43
|
||||
#define OPCODE_GENKEY 0x40
|
||||
#define OPCODE_READ 0x02
|
||||
|
||||
/* Definitions for the READ Command */
|
||||
#define READ_COUNT 7
|
||||
|
||||
/* Definitions for the GenKey Command */
|
||||
#define GENKEY_COUNT 7
|
||||
#define GENKEY_MODE_PRIVATE 0x04
|
||||
|
||||
/* Definitions for the ECDH Command */
|
||||
#define ECDH_COUNT 71
|
||||
#define ECDH_PREFIX_MODE 0x00
|
||||
|
||||
#endif /* __ATMEL_ECC_H__ */
|
364
drivers/crypto/atmel-i2c.c
Обычный файл
364
drivers/crypto/atmel-i2c.c
Обычный файл
@@ -0,0 +1,364 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Microchip / Atmel ECC (I2C) driver.
|
||||
*
|
||||
* Copyright (c) 2017, Microchip Technology Inc.
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitrev.h>
|
||||
#include <linux/crc16.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include "atmel-i2c.h"
|
||||
|
||||
/**
|
||||
* atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
|
||||
* CRC16 verification of the count, opcode, param1, param2 and data bytes.
|
||||
* The checksum is saved in little-endian format in the least significant
|
||||
* two bytes of the command. CRC polynomial is 0x8005 and the initial register
|
||||
* value should be zero.
|
||||
*
|
||||
* @cmd : structure used for communicating with the device.
|
||||
*/
|
||||
static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd)
|
||||
{
|
||||
u8 *data = &cmd->count;
|
||||
size_t len = cmd->count - CRC_SIZE;
|
||||
__le16 *__crc16 = (__le16 *)(data + len);
|
||||
|
||||
*__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len)));
|
||||
}
|
||||
|
||||
void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
|
||||
{
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->opcode = OPCODE_READ;
|
||||
/*
|
||||
* Read the word from Configuration zone that contains the lock bytes
|
||||
* (UserExtra, Selector, LockValue, LockConfig).
|
||||
*/
|
||||
cmd->param1 = CONFIG_ZONE;
|
||||
cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
|
||||
cmd->count = READ_COUNT;
|
||||
|
||||
atmel_i2c_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_READ;
|
||||
cmd->rxsize = READ_RSP_SIZE;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_init_read_cmd);
|
||||
|
||||
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd)
|
||||
{
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->opcode = OPCODE_RANDOM;
|
||||
cmd->param1 = 0;
|
||||
cmd->param2 = 0;
|
||||
cmd->count = RANDOM_COUNT;
|
||||
|
||||
atmel_i2c_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_RANDOM;
|
||||
cmd->rxsize = RANDOM_RSP_SIZE;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_init_random_cmd);
|
||||
|
||||
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid)
|
||||
{
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->count = GENKEY_COUNT;
|
||||
cmd->opcode = OPCODE_GENKEY;
|
||||
cmd->param1 = GENKEY_MODE_PRIVATE;
|
||||
/* a random private key will be generated and stored in slot keyID */
|
||||
cmd->param2 = cpu_to_le16(keyid);
|
||||
|
||||
atmel_i2c_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_GENKEY;
|
||||
cmd->rxsize = GENKEY_RSP_SIZE;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd);
|
||||
|
||||
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
|
||||
struct scatterlist *pubkey)
|
||||
{
|
||||
size_t copied;
|
||||
|
||||
cmd->word_addr = COMMAND;
|
||||
cmd->count = ECDH_COUNT;
|
||||
cmd->opcode = OPCODE_ECDH;
|
||||
cmd->param1 = ECDH_PREFIX_MODE;
|
||||
/* private key slot */
|
||||
cmd->param2 = cpu_to_le16(DATA_SLOT_2);
|
||||
|
||||
/*
|
||||
* The device only supports NIST P256 ECC keys. The public key size will
|
||||
* always be the same. Use a macro for the key size to avoid unnecessary
|
||||
* computations.
|
||||
*/
|
||||
copied = sg_copy_to_buffer(pubkey,
|
||||
sg_nents_for_len(pubkey,
|
||||
ATMEL_ECC_PUBKEY_SIZE),
|
||||
cmd->data, ATMEL_ECC_PUBKEY_SIZE);
|
||||
if (copied != ATMEL_ECC_PUBKEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
atmel_i2c_checksum(cmd);
|
||||
|
||||
cmd->msecs = MAX_EXEC_TIME_ECDH;
|
||||
cmd->rxsize = ECDH_RSP_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd);
|
||||
|
||||
/*
|
||||
* After wake and after execution of a command, there will be error, status, or
|
||||
* result bytes in the device's output register that can be retrieved by the
|
||||
* system. When the length of that group is four bytes, the codes returned are
|
||||
* detailed in error_list.
|
||||
*/
|
||||
static int atmel_i2c_status(struct device *dev, u8 *status)
|
||||
{
|
||||
size_t err_list_len = ARRAY_SIZE(error_list);
|
||||
int i;
|
||||
u8 err_id = status[1];
|
||||
|
||||
if (*status != STATUS_SIZE)
|
||||
return 0;
|
||||
|
||||
if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < err_list_len; i++)
|
||||
if (error_list[i].value == err_id)
|
||||
break;
|
||||
|
||||
/* if err_id is not in the error_list then ignore it */
|
||||
if (i != err_list_len) {
|
||||
dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
|
||||
return err_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int atmel_i2c_wakeup(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
u8 status[STATUS_RSP_SIZE];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The device ignores any levels or transitions on the SCL pin when the
|
||||
* device is idle, asleep or during waking up. Don't check for error
|
||||
* when waking up the device.
|
||||
*/
|
||||
i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
|
||||
|
||||
/*
|
||||
* Wait to wake the device. Typical execution times for ecdh and genkey
|
||||
* are around tens of milliseconds. Delta is chosen to 50 microseconds.
|
||||
*/
|
||||
usleep_range(TWHI_MIN, TWHI_MAX);
|
||||
|
||||
ret = i2c_master_recv(client, status, STATUS_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return atmel_i2c_status(&client->dev, status);
|
||||
}
|
||||
|
||||
static int atmel_i2c_sleep(struct i2c_client *client)
|
||||
{
|
||||
u8 sleep = SLEEP_TOKEN;
|
||||
|
||||
return i2c_master_send(client, &sleep, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* atmel_i2c_send_receive() - send a command to the device and receive its
|
||||
* response.
|
||||
* @client: i2c client device
|
||||
* @cmd : structure used to communicate with the device
|
||||
*
|
||||
* After the device receives a Wake token, a watchdog counter starts within the
|
||||
* device. After the watchdog timer expires, the device enters sleep mode
|
||||
* regardless of whether some I/O transmission or command execution is in
|
||||
* progress. If a command is attempted when insufficient time remains prior to
|
||||
* watchdog timer execution, the device will return the watchdog timeout error
|
||||
* code without attempting to execute the command. There is no way to reset the
|
||||
* counter other than to put the device into sleep or idle mode and then
|
||||
* wake it up again.
|
||||
*/
|
||||
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&i2c_priv->lock);
|
||||
|
||||
ret = atmel_i2c_wakeup(client);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* send the command */
|
||||
ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* delay the appropriate amount of time for command to execute */
|
||||
msleep(cmd->msecs);
|
||||
|
||||
/* receive the response */
|
||||
ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
/* put the device into low-power mode */
|
||||
ret = atmel_i2c_sleep(client);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
mutex_unlock(&i2c_priv->lock);
|
||||
return atmel_i2c_status(&client->dev, cmd->data);
|
||||
err:
|
||||
mutex_unlock(&i2c_priv->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_send_receive);
|
||||
|
||||
static void atmel_i2c_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct atmel_i2c_work_data *work_data =
|
||||
container_of(work, struct atmel_i2c_work_data, work);
|
||||
struct atmel_i2c_cmd *cmd = &work_data->cmd;
|
||||
struct i2c_client *client = work_data->client;
|
||||
int status;
|
||||
|
||||
status = atmel_i2c_send_receive(client, cmd);
|
||||
work_data->cbk(work_data, work_data->areq, status);
|
||||
}
|
||||
|
||||
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
|
||||
void (*cbk)(struct atmel_i2c_work_data *work_data,
|
||||
void *areq, int status),
|
||||
void *areq)
|
||||
{
|
||||
work_data->cbk = (void *)cbk;
|
||||
work_data->areq = areq;
|
||||
|
||||
INIT_WORK(&work_data->work, atmel_i2c_work_handler);
|
||||
schedule_work(&work_data->work);
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_enqueue);
|
||||
|
||||
static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate)
|
||||
{
|
||||
u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
|
||||
|
||||
/* return the size of the wake_token in bytes */
|
||||
return DIV_ROUND_UP(no_of_bits, 8);
|
||||
}
|
||||
|
||||
static int device_sanity_check(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_i2c_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
atmel_i2c_init_read_cmd(cmd);
|
||||
|
||||
ret = atmel_i2c_send_receive(client, cmd);
|
||||
if (ret)
|
||||
goto free_cmd;
|
||||
|
||||
/*
|
||||
* It is vital that the Configuration, Data and OTP zones be locked
|
||||
* prior to release into the field of the system containing the device.
|
||||
* Failure to lock these zones may permit modification of any secret
|
||||
* keys and may lead to other security problems.
|
||||
*/
|
||||
if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
|
||||
dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
|
||||
ret = -ENOTSUPP;
|
||||
}
|
||||
|
||||
/* fall through */
|
||||
free_cmd:
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv;
|
||||
struct device *dev = &client->dev;
|
||||
int ret;
|
||||
u32 bus_clk_rate;
|
||||
|
||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||
dev_err(dev, "I2C_FUNC_I2C not supported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev);
|
||||
if (!bus_clk_rate) {
|
||||
ret = device_property_read_u32(&client->adapter->dev,
|
||||
"clock-frequency", &bus_clk_rate);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to read clock-frequency property\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (bus_clk_rate > 1000000L) {
|
||||
dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
|
||||
bus_clk_rate);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
|
||||
if (!i2c_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
i2c_priv->client = client;
|
||||
mutex_init(&i2c_priv->lock);
|
||||
|
||||
/*
|
||||
* WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
|
||||
* 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
|
||||
* will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
|
||||
*/
|
||||
i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate);
|
||||
|
||||
memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
|
||||
|
||||
atomic_set(&i2c_priv->tfm_count, 0);
|
||||
|
||||
i2c_set_clientdata(client, i2c_priv);
|
||||
|
||||
ret = device_sanity_check(client);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(atmel_i2c_probe);
|
||||
|
||||
MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
|
||||
MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
|
||||
MODULE_LICENSE("GPL v2");
|
197
drivers/crypto/atmel-i2c.h
Обычный файл
197
drivers/crypto/atmel-i2c.h
Обычный файл
@@ -0,0 +1,197 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017, Microchip Technology Inc.
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*/
|
||||
|
||||
#ifndef __ATMEL_I2C_H__
|
||||
#define __ATMEL_I2C_H__
|
||||
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ATMEL_ECC_PRIORITY 300
|
||||
|
||||
#define COMMAND 0x03 /* packet function */
|
||||
#define SLEEP_TOKEN 0x01
|
||||
#define WAKE_TOKEN_MAX_SIZE 8
|
||||
|
||||
/* Definitions of Data and Command sizes */
|
||||
#define WORD_ADDR_SIZE 1
|
||||
#define COUNT_SIZE 1
|
||||
#define CRC_SIZE 2
|
||||
#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
|
||||
|
||||
/* size in bytes of the n prime */
|
||||
#define ATMEL_ECC_NIST_P256_N_SIZE 32
|
||||
#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
|
||||
|
||||
#define STATUS_RSP_SIZE 4
|
||||
#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
||||
#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
|
||||
CMD_OVERHEAD_SIZE)
|
||||
#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
|
||||
#define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
|
||||
#define MAX_RSP_SIZE GENKEY_RSP_SIZE
|
||||
|
||||
/**
|
||||
* atmel_i2c_cmd - structure used for communicating with the device.
|
||||
* @word_addr: indicates the function of the packet sent to the device. This
|
||||
* byte should have a value of COMMAND for normal operation.
|
||||
* @count : number of bytes to be transferred to (or from) the device.
|
||||
* @opcode : the command code.
|
||||
* @param1 : the first parameter; always present.
|
||||
* @param2 : the second parameter; always present.
|
||||
* @data : optional remaining input data. Includes a 2-byte CRC.
|
||||
* @rxsize : size of the data received from i2c client.
|
||||
* @msecs : command execution time in milliseconds
|
||||
*/
|
||||
struct atmel_i2c_cmd {
|
||||
u8 word_addr;
|
||||
u8 count;
|
||||
u8 opcode;
|
||||
u8 param1;
|
||||
__le16 param2;
|
||||
u8 data[MAX_RSP_SIZE];
|
||||
u8 msecs;
|
||||
u16 rxsize;
|
||||
} __packed;
|
||||
|
||||
/* Status/Error codes */
|
||||
#define STATUS_SIZE 0x04
|
||||
#define STATUS_NOERR 0x00
|
||||
#define STATUS_WAKE_SUCCESSFUL 0x11
|
||||
|
||||
static const struct {
|
||||
u8 value;
|
||||
const char *error_text;
|
||||
} error_list[] = {
|
||||
{ 0x01, "CheckMac or Verify miscompare" },
|
||||
{ 0x03, "Parse Error" },
|
||||
{ 0x05, "ECC Fault" },
|
||||
{ 0x0F, "Execution Error" },
|
||||
{ 0xEE, "Watchdog about to expire" },
|
||||
{ 0xFF, "CRC or other communication error" },
|
||||
};
|
||||
|
||||
/* Definitions for eeprom organization */
|
||||
#define CONFIG_ZONE 0
|
||||
|
||||
/* Definitions for Indexes common to all commands */
|
||||
#define RSP_DATA_IDX 1 /* buffer index of data in response */
|
||||
#define DATA_SLOT_2 2 /* used for ECDH private key */
|
||||
|
||||
/* Definitions for the device lock state */
|
||||
#define DEVICE_LOCK_ADDR 0x15
|
||||
#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
|
||||
#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
|
||||
|
||||
/*
|
||||
* Wake High delay to data communication (microseconds). SDA should be stable
|
||||
* high for this entire duration.
|
||||
*/
|
||||
#define TWHI_MIN 1500
|
||||
#define TWHI_MAX 1550
|
||||
|
||||
/* Wake Low duration */
|
||||
#define TWLO_USEC 60
|
||||
|
||||
/* Command execution time (milliseconds) */
|
||||
#define MAX_EXEC_TIME_ECDH 58
|
||||
#define MAX_EXEC_TIME_GENKEY 115
|
||||
#define MAX_EXEC_TIME_READ 1
|
||||
#define MAX_EXEC_TIME_RANDOM 50
|
||||
|
||||
/* Command opcode */
|
||||
#define OPCODE_ECDH 0x43
|
||||
#define OPCODE_GENKEY 0x40
|
||||
#define OPCODE_READ 0x02
|
||||
#define OPCODE_RANDOM 0x1b
|
||||
|
||||
/* Definitions for the READ Command */
|
||||
#define READ_COUNT 7
|
||||
|
||||
/* Definitions for the RANDOM Command */
|
||||
#define RANDOM_COUNT 7
|
||||
|
||||
/* Definitions for the GenKey Command */
|
||||
#define GENKEY_COUNT 7
|
||||
#define GENKEY_MODE_PRIVATE 0x04
|
||||
|
||||
/* Definitions for the ECDH Command */
|
||||
#define ECDH_COUNT 71
|
||||
#define ECDH_PREFIX_MODE 0x00
|
||||
|
||||
/* Used for binding tfm objects to i2c clients. */
|
||||
struct atmel_ecc_driver_data {
|
||||
struct list_head i2c_client_list;
|
||||
spinlock_t i2c_list_lock;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
/**
|
||||
* atmel_i2c_client_priv - i2c_client private data
|
||||
* @client : pointer to i2c client device
|
||||
* @i2c_client_list_node: part of i2c_client_list
|
||||
* @lock : lock for sending i2c commands
|
||||
* @wake_token : wake token array of zeros
|
||||
* @wake_token_sz : size in bytes of the wake_token
|
||||
* @tfm_count : number of active crypto transformations on i2c client
|
||||
*
|
||||
* Reads and writes from/to the i2c client are sequential. The first byte
|
||||
* transmitted to the device is treated as the byte size. Any attempt to send
|
||||
* more than this number of bytes will cause the device to not ACK those bytes.
|
||||
* After the host writes a single command byte to the input buffer, reads are
|
||||
* prohibited until after the device completes command execution. Use a mutex
|
||||
* when sending i2c commands.
|
||||
*/
|
||||
struct atmel_i2c_client_priv {
|
||||
struct i2c_client *client;
|
||||
struct list_head i2c_client_list_node;
|
||||
struct mutex lock;
|
||||
u8 wake_token[WAKE_TOKEN_MAX_SIZE];
|
||||
size_t wake_token_sz;
|
||||
atomic_t tfm_count ____cacheline_aligned;
|
||||
struct hwrng hwrng;
|
||||
};
|
||||
|
||||
/**
|
||||
* atmel_i2c_work_data - data structure representing the work
|
||||
* @ctx : transformation context.
|
||||
* @cbk : pointer to a callback function to be invoked upon completion of this
|
||||
* request. This has the form:
|
||||
* callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status)
|
||||
* where:
|
||||
* @work_data: data structure representing the work
|
||||
* @areq : optional pointer to an argument passed with the original
|
||||
* request.
|
||||
* @status : status returned from the i2c client device or i2c error.
|
||||
* @areq: optional pointer to a user argument for use at callback time.
|
||||
* @work: describes the task to be executed.
|
||||
* @cmd : structure used for communicating with the device.
|
||||
*/
|
||||
struct atmel_i2c_work_data {
|
||||
void *ctx;
|
||||
struct i2c_client *client;
|
||||
void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq,
|
||||
int status);
|
||||
void *areq;
|
||||
struct work_struct work;
|
||||
struct atmel_i2c_cmd cmd;
|
||||
};
|
||||
|
||||
int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
|
||||
|
||||
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
|
||||
void (*cbk)(struct atmel_i2c_work_data *work_data,
|
||||
void *areq, int status),
|
||||
void *areq);
|
||||
|
||||
int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd);
|
||||
|
||||
void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd);
|
||||
void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd);
|
||||
void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid);
|
||||
int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd,
|
||||
struct scatterlist *pubkey);
|
||||
|
||||
#endif /* __ATMEL_I2C_H__ */
|
171
drivers/crypto/atmel-sha204a.c
Обычный файл
171
drivers/crypto/atmel-sha204a.c
Обычный файл
@@ -0,0 +1,171 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Microchip / Atmel SHA204A (I2C) driver.
|
||||
*
|
||||
* Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include "atmel-i2c.h"
|
||||
|
||||
static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data,
|
||||
void *areq, int status)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv = work_data->ctx;
|
||||
struct hwrng *rng = areq;
|
||||
|
||||
if (status)
|
||||
dev_warn_ratelimited(&i2c_priv->client->dev,
|
||||
"i2c transaction failed (%d)\n",
|
||||
status);
|
||||
|
||||
rng->priv = (unsigned long)work_data;
|
||||
atomic_dec(&i2c_priv->tfm_count);
|
||||
}
|
||||
|
||||
static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
|
||||
size_t max)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv;
|
||||
struct atmel_i2c_work_data *work_data;
|
||||
|
||||
i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
|
||||
|
||||
/* keep maximum 1 asynchronous read in flight at any time */
|
||||
if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1))
|
||||
return 0;
|
||||
|
||||
if (rng->priv) {
|
||||
work_data = (struct atmel_i2c_work_data *)rng->priv;
|
||||
max = min(sizeof(work_data->cmd.data), max);
|
||||
memcpy(data, &work_data->cmd.data, max);
|
||||
rng->priv = 0;
|
||||
} else {
|
||||
work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
|
||||
if (!work_data)
|
||||
return -ENOMEM;
|
||||
|
||||
work_data->ctx = i2c_priv;
|
||||
work_data->client = i2c_priv->client;
|
||||
|
||||
max = 0;
|
||||
}
|
||||
|
||||
atmel_i2c_init_random_cmd(&work_data->cmd);
|
||||
atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng);
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
|
||||
bool wait)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv;
|
||||
struct atmel_i2c_cmd cmd;
|
||||
int ret;
|
||||
|
||||
if (!wait)
|
||||
return atmel_sha204a_rng_read_nonblocking(rng, data, max);
|
||||
|
||||
i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng);
|
||||
|
||||
atmel_i2c_init_random_cmd(&cmd);
|
||||
|
||||
ret = atmel_i2c_send_receive(i2c_priv->client, &cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
max = min(sizeof(cmd.data), max);
|
||||
memcpy(data, cmd.data, max);
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
static int atmel_sha204a_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv;
|
||||
int ret;
|
||||
|
||||
ret = atmel_i2c_probe(client, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i2c_priv = i2c_get_clientdata(client);
|
||||
|
||||
memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng));
|
||||
|
||||
i2c_priv->hwrng.name = dev_name(&client->dev);
|
||||
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
|
||||
i2c_priv->hwrng.quality = 1024;
|
||||
|
||||
ret = hwrng_register(&i2c_priv->hwrng);
|
||||
if (ret)
|
||||
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int atmel_sha204a_remove(struct i2c_client *client)
|
||||
{
|
||||
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
|
||||
|
||||
if (atomic_read(&i2c_priv->tfm_count)) {
|
||||
dev_err(&client->dev, "Device is busy\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (i2c_priv->hwrng.priv)
|
||||
kfree((void *)i2c_priv->hwrng.priv);
|
||||
hwrng_unregister(&i2c_priv->hwrng);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id atmel_sha204a_dt_ids[] = {
|
||||
{ .compatible = "atmel,atsha204a", },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
|
||||
|
||||
static const struct i2c_device_id atmel_sha204a_id[] = {
|
||||
{ "atsha204a", 0 },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
|
||||
|
||||
static struct i2c_driver atmel_sha204a_driver = {
|
||||
.probe = atmel_sha204a_probe,
|
||||
.remove = atmel_sha204a_remove,
|
||||
.id_table = atmel_sha204a_id,
|
||||
|
||||
.driver.name = "atmel-sha204a",
|
||||
.driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids),
|
||||
};
|
||||
|
||||
static int __init atmel_sha204a_init(void)
|
||||
{
|
||||
return i2c_add_driver(&atmel_sha204a_driver);
|
||||
}
|
||||
|
||||
static void __exit atmel_sha204a_exit(void)
|
||||
{
|
||||
flush_scheduled_work();
|
||||
i2c_del_driver(&atmel_sha204a_driver);
|
||||
}
|
||||
|
||||
module_init(atmel_sha204a_init);
|
||||
module_exit(atmel_sha204a_exit);
|
||||
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
|
||||
* 0x70 - ring 2
|
||||
* 0x78 - ring 3
|
||||
*/
|
||||
char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
|
||||
static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
|
||||
/*
|
||||
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
|
||||
* is set dynamically after reading SPU type from device tree.
|
||||
@@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req)
|
||||
* Return: true if incremental hashing is not supported
|
||||
* false otherwise
|
||||
*/
|
||||
bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
|
||||
static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
|
||||
{
|
||||
struct spu_hw *spu = &iproc_priv.spu;
|
||||
|
||||
@@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bcm_spu_probe(struct platform_device *pdev)
|
||||
static int bcm_spu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spu_hw *spu = &iproc_priv.spu;
|
||||
@@ -4853,7 +4853,7 @@ failure:
|
||||
return err;
|
||||
}
|
||||
|
||||
int bcm_spu_remove(struct platform_device *pdev)
|
||||
static int bcm_spu_remove(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@@ -38,21 +38,21 @@ enum spu2_proto_sel {
|
||||
SPU2_DTLS_AEAD = 10
|
||||
};
|
||||
|
||||
char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||
static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||
"DES", "3DES"
|
||||
};
|
||||
|
||||
char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
|
||||
"CCM", "GCM"
|
||||
static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
|
||||
"XTS", "CCM", "GCM"
|
||||
};
|
||||
|
||||
char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||
static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
|
||||
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
|
||||
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
|
||||
"SHA3-384", "SHA3-512"
|
||||
};
|
||||
|
||||
char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
|
||||
static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
|
||||
"Rabin", "CCM", "GCM", "Reserved"
|
||||
};
|
||||
|
||||
|
@@ -2,6 +2,12 @@
|
||||
config CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM
|
||||
tristate "Freescale CAAM-Multicore platform driver backend"
|
||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||
@@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
Selecting this will enable printing of various debug
|
||||
information in the CAAM driver.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_JR
|
||||
menuconfig CRYPTO_DEV_FSL_CAAM_JR
|
||||
tristate "Freescale CAAM Job Ring driver backend"
|
||||
default y
|
||||
help
|
||||
@@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
|
||||
threshold. Range is 1-65535.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
tristate "Register algorithm implementations with the Crypto API"
|
||||
bool "Register algorithm implementations with the Crypto API"
|
||||
default y
|
||||
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
@@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
scatterlist crypto API (such as the linux native IPSec
|
||||
stack) to the SEC4 via job ring.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamalg.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
||||
tristate "Queue Interface as Crypto API backend"
|
||||
bool "Queue Interface as Crypto API backend"
|
||||
depends on FSL_DPAA && NET
|
||||
default y
|
||||
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
@@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
||||
assigned to the kernel should also be more than the number of
|
||||
job rings.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamalg_qi.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
tristate "Register hash algorithm implementations with Crypto API"
|
||||
bool "Register hash algorithm implementations with Crypto API"
|
||||
default y
|
||||
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Selecting this will offload ahash for users of the
|
||||
scatterlist crypto API to the SEC4 via job ring.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamhash.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
tristate "Register public key cryptography implementations with Crypto API"
|
||||
bool "Register public key cryptography implementations with Crypto API"
|
||||
default y
|
||||
select CRYPTO_RSA
|
||||
help
|
||||
Selecting this will allow SEC Public key support for RSA.
|
||||
Supported cryptographic primitives: encryption, decryption,
|
||||
signature and verification.
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caam_pkc.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
tristate "Register caam device for hwrng API"
|
||||
bool "Register caam device for hwrng API"
|
||||
default y
|
||||
select CRYPTO_RNG
|
||||
select HW_RANDOM
|
||||
@@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
Selecting this will register the SEC4 hardware rng to
|
||||
the hw_random API for suppying the kernel entropy pool.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamrng.
|
||||
|
||||
endif # CRYPTO_DEV_FSL_CAAM_JR
|
||||
|
||||
endif # CRYPTO_DEV_FSL_CAAM
|
||||
@@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
|
||||
depends on FSL_MC_DPIO
|
||||
depends on NETDEVICES
|
||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_AEAD
|
||||
@@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called dpaa2_caam.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
|
||||
CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
|
||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
|
||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
||||
|
@@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\"
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
|
||||
|
||||
caam-objs := ctrl.o
|
||||
caam_jr-objs := jr.o key_gen.o
|
||||
caam_pkc-y := caampkc.o pkc_desc.o
|
||||
caam-y := ctrl.o
|
||||
caam_jr-y := jr.o key_gen.o
|
||||
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
|
||||
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
||||
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
||||
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
|
||||
|
||||
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
|
||||
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
|
||||
ccflags-y += -DCONFIG_CAAM_QI
|
||||
caam-objs += qi.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
|
||||
|
@@ -77,13 +77,6 @@
|
||||
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
|
||||
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
|
||||
|
||||
#ifdef DEBUG
|
||||
/* for print_hex_dumps with line references */
|
||||
#define debug(format, arg...) printk(format, arg)
|
||||
#else
|
||||
#define debug(format, arg...)
|
||||
#endif
|
||||
|
||||
struct caam_alg_entry {
|
||||
int class1_alg_type;
|
||||
int class2_alg_type;
|
||||
@@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead,
|
||||
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
||||
goto badkey;
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
|
||||
dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
||||
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
||||
keys.authkeylen);
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
/*
|
||||
* If DKP is supported, use it in the shared descriptor to generate
|
||||
@@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead,
|
||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
|
||||
keys.enckeylen, ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
ctx->adata.keylen_pad + keys.enckeylen, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
ctx->adata.keylen_pad + keys.enckeylen, 1);
|
||||
|
||||
skip_split_key:
|
||||
ctx->cdata.keylen = keys.enckeylen;
|
||||
@@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead,
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
||||
@@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
|
||||
@@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
|
||||
@@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
OP_ALG_AAI_CTR_MOD128);
|
||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
/*
|
||||
* AES-CTR needs to load IV in CONTEXT1 reg
|
||||
* at an offset of 128bits (16bytes)
|
||||
@@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
}
|
||||
|
||||
if (iv_dma)
|
||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL);
|
||||
if (sec4_sg_bytes)
|
||||
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct aead_request *req = context;
|
||||
struct aead_edesc *edesc;
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||
|
||||
@@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct aead_request *req = context;
|
||||
struct aead_edesc *edesc;
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
|
||||
|
||||
@@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
#endif
|
||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block. This is used e.g. by the CTS mode.
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
*/
|
||||
if (ivsize)
|
||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
|
||||
ivsize, ivsize, 0);
|
||||
if (ivsize) {
|
||||
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
|
||||
ivsize);
|
||||
|
||||
print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
}
|
||||
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
@@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
{
|
||||
struct skcipher_request *req = context;
|
||||
struct skcipher_edesc *edesc;
|
||||
#ifdef DEBUG
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
||||
#endif
|
||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
*/
|
||||
if (ivsize) {
|
||||
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
|
||||
ivsize);
|
||||
|
||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
ivsize, 1);
|
||||
}
|
||||
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
skcipher_unmap(jrdev, edesc, req);
|
||||
kfree(edesc);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
@@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req,
|
||||
if (unlikely(req->src != req->dst)) {
|
||||
if (!edesc->mapped_dst_nents) {
|
||||
dst_dma = 0;
|
||||
out_options = 0;
|
||||
} else if (edesc->mapped_dst_nents == 1) {
|
||||
dst_dma = sg_dma_address(req->dst);
|
||||
out_options = 0;
|
||||
@@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
||||
{
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
u32 *desc = edesc->hw_desc;
|
||||
u32 *sh_desc;
|
||||
@@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req,
|
||||
dma_addr_t src_dma, dst_dma, ptr;
|
||||
int len, sec4_sg_index = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
||||
pr_err("asked=%d, cryptlen%d\n",
|
||||
print_hex_dump_debug("presciv@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
|
||||
dev_dbg(jrdev, "asked=%d, cryptlen%d\n",
|
||||
(int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
|
||||
#endif
|
||||
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
|
||||
|
||||
caam_dump_sg("src @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
||||
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
@@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
||||
if (likely(req->src == req->dst)) {
|
||||
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
|
||||
out_options = in_options;
|
||||
} else if (edesc->mapped_dst_nents == 1) {
|
||||
} else if (!ivsize && edesc->mapped_dst_nents == 1) {
|
||||
dst_dma = sg_dma_address(req->dst);
|
||||
} else {
|
||||
dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
|
||||
@@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req,
|
||||
out_options = LDST_SGF;
|
||||
}
|
||||
|
||||
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
|
||||
append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
int src_len, dst_len = 0;
|
||||
struct aead_edesc *edesc;
|
||||
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
||||
unsigned int authsize = ctx->authsize;
|
||||
|
||||
if (unlikely(req->dst != req->src)) {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen);
|
||||
src_len = req->assoclen + req->cryptlen;
|
||||
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen);
|
||||
src_len);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
|
||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize :
|
||||
(-authsize)));
|
||||
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||
if (unlikely(dst_nents < 0)) {
|
||||
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : (-authsize)));
|
||||
dst_len);
|
||||
return ERR_PTR(dst_nents);
|
||||
}
|
||||
} else {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len = req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0);
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
}
|
||||
@@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries.
|
||||
*/
|
||||
sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
|
||||
sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
if (mapped_dst_nents > 1)
|
||||
sec4_sg_len += pad_sg_nents(mapped_dst_nents);
|
||||
else
|
||||
sec4_sg_len = pad_sg_nents(sec4_sg_len);
|
||||
|
||||
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
@@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
|
||||
sec4_sg_index = 0;
|
||||
if (mapped_src_nents > 1) {
|
||||
sg_to_sec4_sg_last(req->src, mapped_src_nents,
|
||||
sg_to_sec4_sg_last(req->src, src_len,
|
||||
edesc->sec4_sg + sec4_sg_index, 0);
|
||||
sec4_sg_index += mapped_src_nents;
|
||||
}
|
||||
if (mapped_dst_nents > 1) {
|
||||
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
|
||||
sg_to_sec4_sg_last(req->dst, dst_len,
|
||||
edesc->sec4_sg + sec4_sg_index, 0);
|
||||
}
|
||||
|
||||
@@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req)
|
||||
|
||||
/* Create and submit job descriptor */
|
||||
init_gcm_job(req, edesc, all_contig, true);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||
@@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req)
|
||||
|
||||
/* Create and submit job descriptor */
|
||||
init_authenc_job(req, edesc, all_contig, true);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
|
||||
@@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req)
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_gcm_job(req, edesc, all_contig, false);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||
@@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req)
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
|
||||
caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
|
||||
caam_dump_sg("dec src@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
|
||||
req->assoclen + req->cryptlen, 1);
|
||||
|
||||
@@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req)
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_authenc_job(req, edesc, all_contig, false);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
|
||||
@@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
else
|
||||
sec4_sg_ents = mapped_src_nents + !!ivsize;
|
||||
dst_sg_idx = sec4_sg_ents;
|
||||
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
|
||||
/*
|
||||
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||
* IV entries point to the same buffer
|
||||
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||
*
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries. Logic:
|
||||
* if (output S/G)
|
||||
* pad output S/G, if needed
|
||||
* else if (input S/G) ...
|
||||
* pad input S/G, if needed
|
||||
*/
|
||||
if (ivsize || mapped_dst_nents > 1) {
|
||||
if (req->src == req->dst)
|
||||
sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents);
|
||||
else
|
||||
sec4_sg_ents += pad_sg_nents(mapped_dst_nents +
|
||||
!!ivsize);
|
||||
} else {
|
||||
sec4_sg_ents = pad_sg_nents(sec4_sg_ents);
|
||||
}
|
||||
|
||||
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/*
|
||||
@@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
if (ivsize) {
|
||||
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
|
||||
iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
|
||||
memcpy(iv, req->iv, ivsize);
|
||||
|
||||
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
|
||||
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(jrdev, iv_dma)) {
|
||||
dev_err(jrdev, "unable to map IV\n");
|
||||
caam_unmap(jrdev, req->src, req->dst, src_nents,
|
||||
@@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
|
||||
}
|
||||
if (dst_sg_idx)
|
||||
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
|
||||
!!ivsize, 0);
|
||||
sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg +
|
||||
!!ivsize, 0);
|
||||
|
||||
if (mapped_dst_nents > 1) {
|
||||
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
|
||||
edesc->sec4_sg + dst_sg_idx, 0);
|
||||
}
|
||||
if (req->src != req->dst && (ivsize || mapped_dst_nents > 1))
|
||||
sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg +
|
||||
dst_sg_idx, 0);
|
||||
|
||||
if (ivsize)
|
||||
dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx +
|
||||
mapped_dst_nents, iv_dma, ivsize, 0);
|
||||
|
||||
if (ivsize || mapped_dst_nents > 1)
|
||||
sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
|
||||
mapped_dst_nents);
|
||||
|
||||
if (sec4_sg_bytes) {
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
@@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
|
||||
edesc->iv_dma = iv_dma;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||
sec4_sg_bytes, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||
sec4_sg_bytes, 1);
|
||||
|
||||
return edesc;
|
||||
}
|
||||
@@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req)
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_skcipher_job(req, edesc, true);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
desc = edesc->hw_desc;
|
||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
|
||||
|
||||
@@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
||||
struct skcipher_edesc *edesc;
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc;
|
||||
int ret = 0;
|
||||
@@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block.
|
||||
*/
|
||||
if (ivsize)
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
|
||||
ivsize, ivsize, 0);
|
||||
|
||||
/* Create and submit job descriptor*/
|
||||
init_skcipher_job(req, edesc, false);
|
||||
desc = edesc->hw_desc;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
|
||||
desc_bytes(edesc->hw_desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
|
||||
if (!ret) {
|
||||
@@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
|
||||
caam_exit_common(crypto_aead_ctx(tfm));
|
||||
}
|
||||
|
||||
static void __exit caam_algapi_exit(void)
|
||||
void caam_algapi_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
||||
alg->exit = caam_aead_exit;
|
||||
}
|
||||
|
||||
static int __init caam_algapi_init(void)
|
||||
int caam_algapi_init(struct device *ctrldev)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct caam_drv_private *priv;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
int i = 0, err = 0;
|
||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
|
||||
u32 arc4_inst;
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
bool registered = false, gcm_support;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = dev_get_drvdata(&pdev->dev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Register crypto algorithms the device supports.
|
||||
* First, detect presence and attributes of DES, AES, and MD blocks.
|
||||
@@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void)
|
||||
if (registered)
|
||||
pr_info("caam algorithms registered in /proc/crypto\n");
|
||||
|
||||
out_put_dev:
|
||||
put_device(&pdev->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(caam_algapi_init);
|
||||
module_exit(caam_algapi_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM support for crypto API");
|
||||
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
|
||||
|
@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type)
|
||||
}
|
||||
|
||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
|
||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT);
|
||||
append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT);
|
||||
uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
|
||||
set_jump_tgt_here(desc, jump_cmd);
|
||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT | OP_ALG_AAI_DK);
|
||||
append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT |
|
||||
OP_ALG_AAI_DK);
|
||||
set_jump_tgt_here(desc, uncond_jump_cmd);
|
||||
}
|
||||
|
||||
@@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"aead null enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
|
||||
|
||||
@@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
|
||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"aead null dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
|
||||
|
||||
@@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
|
||||
|
||||
@@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
|
||||
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
|
||||
|
||||
@@ -613,11 +606,9 @@ copy_iv:
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"aead givenc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
|
||||
|
||||
@@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
|
||||
|
||||
@@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
|
||||
|
||||
@@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"rfc4106 enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
|
||||
|
||||
@@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"rfc4106 dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
|
||||
|
||||
@@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"rfc4543 enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
|
||||
|
||||
@@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"rfc4543 dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
|
||||
|
||||
@@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
LDST_OFFSET_SHIFT));
|
||||
|
||||
/* Load operation */
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
/* Perform operation */
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"skcipher enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
/* Store IV */
|
||||
if (ivsize)
|
||||
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||
LDST_OFFSET_SHIFT));
|
||||
|
||||
print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
|
||||
|
||||
@@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
|
||||
/* Choose operation */
|
||||
if (ctx1_iv_off)
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INIT |
|
||||
OP_ALG_DECRYPT);
|
||||
else
|
||||
append_dec_op1(desc, cdata->algtype);
|
||||
@@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
/* Perform operation */
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"skcipher dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
/* Store IV */
|
||||
if (ivsize)
|
||||
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | (ctx1_iv_off <<
|
||||
LDST_OFFSET_SHIFT));
|
||||
|
||||
print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
|
||||
|
||||
@@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
|
||||
/* Perform operation */
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
/* Store upper 8B of IV */
|
||||
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
|
||||
(0x20 << LDST_OFFSET_SHIFT));
|
||||
|
||||
print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
|
||||
": ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
|
||||
|
||||
@@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
|
||||
/* Perform operation */
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
/* Store upper 8B of IV */
|
||||
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
|
||||
(0x20 << LDST_OFFSET_SHIFT));
|
||||
|
||||
print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
|
||||
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
|
||||
|
||||
|
@@ -44,9 +44,9 @@
|
||||
|
||||
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
|
||||
20 * CAAM_CMD_SZ)
|
||||
21 * CAAM_CMD_SZ)
|
||||
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
|
||||
15 * CAAM_CMD_SZ)
|
||||
16 * CAAM_CMD_SZ)
|
||||
|
||||
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
||||
unsigned int icvsize, int era);
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* Based on caamalg.c
|
||||
*
|
||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2018 NXP
|
||||
* Copyright 2016-2019 NXP
|
||||
*/
|
||||
|
||||
#include "compat.h"
|
||||
@@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
||||
goto badkey;
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
||||
dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
|
||||
keys.authkeylen + keys.enckeylen, keys.enckeylen,
|
||||
keys.authkeylen);
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
/*
|
||||
* If DKP is supported, use it in the shared descriptor to generate
|
||||
@@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
memcpy(ctx->key, keys.authkey, keys.authkeylen);
|
||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
|
||||
keys.enckeylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma,
|
||||
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||
ctx->adata.keylen_pad +
|
||||
keys.enckeylen, ctx->dir);
|
||||
goto skip_split_key;
|
||||
@@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
|
||||
/* postpend encryption key to auth split key */
|
||||
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
|
||||
keys.enckeylen, ctx->dir);
|
||||
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||
ctx->adata.keylen_pad + keys.enckeylen,
|
||||
ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
|
||||
@@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead,
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
||||
dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
|
||||
ctx->dir);
|
||||
ctx->cdata.keylen = keylen;
|
||||
|
||||
ret = gcm_set_sh_desc(aead);
|
||||
@@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
/*
|
||||
@@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead,
|
||||
* in the nonce. Update the AES key length.
|
||||
*/
|
||||
ctx->cdata.keylen = keylen - 4;
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
||||
ctx->dir);
|
||||
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||
ctx->cdata.keylen, ctx->dir);
|
||||
|
||||
ret = rfc4106_set_sh_desc(aead);
|
||||
if (ret)
|
||||
@@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
/*
|
||||
@@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead,
|
||||
* in the nonce. Update the AES key length.
|
||||
*/
|
||||
ctx->cdata.keylen = keylen - 4;
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
||||
ctx->dir);
|
||||
dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
|
||||
ctx->cdata.keylen, ctx->dir);
|
||||
|
||||
ret = rfc4543_set_sh_desc(aead);
|
||||
if (ret)
|
||||
@@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
|
||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||
int ret = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
|
||||
/*
|
||||
* AES-CTR needs to load IV in CONTEXT1 reg
|
||||
* at an offset of 128bits (16bytes)
|
||||
@@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
|
||||
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
struct scatterlist *dst, int src_nents,
|
||||
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
||||
dma_addr_t qm_sg_dma, int qm_sg_bytes)
|
||||
enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
|
||||
int qm_sg_bytes)
|
||||
{
|
||||
if (dst != src) {
|
||||
if (src_nents)
|
||||
@@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
}
|
||||
|
||||
if (iv_dma)
|
||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
|
||||
if (qm_sg_bytes)
|
||||
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
||||
}
|
||||
@@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev,
|
||||
int ivsize = crypto_aead_ivsize(aead);
|
||||
|
||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
||||
edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
|
||||
edesc->qm_sg_bytes);
|
||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
@@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
||||
edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
|
||||
edesc->qm_sg_bytes);
|
||||
}
|
||||
|
||||
static void aead_done(struct caam_drv_req *drv_req, u32 status)
|
||||
@@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
int src_len, dst_len = 0;
|
||||
struct aead_edesc *edesc;
|
||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||
int ivsize = 0;
|
||||
@@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
}
|
||||
|
||||
if (likely(req->src == req->dst)) {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len = req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0);
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
@@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
} else {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen);
|
||||
src_len = req->assoclen + req->cryptlen;
|
||||
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen);
|
||||
src_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
|
||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize :
|
||||
(-authsize)));
|
||||
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||
if (unlikely(dst_nents < 0)) {
|
||||
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : (-authsize)));
|
||||
dst_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(dst_nents);
|
||||
}
|
||||
@@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
/*
|
||||
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
||||
* Input is not contiguous.
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries. Logic:
|
||||
* if (src != dst && output S/G)
|
||||
* pad output S/G, if needed
|
||||
* else if (src == dst && S/G)
|
||||
* overlapping S/Gs; pad one of them
|
||||
* else if (input S/G) ...
|
||||
* pad input S/G, if needed
|
||||
*/
|
||||
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
|
||||
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
|
||||
qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
|
||||
if (mapped_dst_nents > 1)
|
||||
qm_sg_ents += pad_sg_nents(mapped_dst_nents);
|
||||
else if ((req->src == req->dst) && (mapped_src_nents > 1))
|
||||
qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
|
||||
1 + !!ivsize + pad_sg_nents(mapped_src_nents));
|
||||
else
|
||||
qm_sg_ents = pad_sg_nents(qm_sg_ents);
|
||||
|
||||
sg_table = &edesc->sgt[0];
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
|
||||
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
||||
@@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents,
|
||||
dst_nents, 0, 0, 0, 0);
|
||||
dst_nents, 0, 0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
|
||||
dev_err(qidev, "unable to map assoclen\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
||||
qm_sg_index++;
|
||||
}
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
|
||||
sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
|
||||
qm_sg_index += mapped_src_nents;
|
||||
|
||||
if (mapped_dst_nents > 1)
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
qm_sg_index, 0);
|
||||
sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
|
||||
|
||||
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, qm_sg_dma)) {
|
||||
dev_err(qidev, "unable to map S/G table\n");
|
||||
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
|
||||
(1 + !!ivsize) * sizeof(*sg_table),
|
||||
out_len, 0);
|
||||
} else if (mapped_dst_nents == 1) {
|
||||
} else if (mapped_dst_nents <= 1) {
|
||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
|
||||
0);
|
||||
} else {
|
||||
@@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
|
||||
struct device *qidev = caam_ctx->qidev;
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
|
||||
#endif
|
||||
dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
|
||||
|
||||
edesc = container_of(drv_req, typeof(*edesc), drv_req);
|
||||
|
||||
if (status)
|
||||
caam_jr_strstatus(qidev, status);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
|
||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
#endif
|
||||
|
||||
skcipher_unmap(qidev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block. This is used e.g. by the CTS mode.
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
*/
|
||||
if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
|
||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
|
||||
ivsize, ivsize, 0);
|
||||
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||
|
||||
qi_cache_free(edesc);
|
||||
skcipher_request_complete(req, status);
|
||||
@@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
qm_sg_ents = 1 + mapped_src_nents;
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
|
||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
/*
|
||||
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||
* IV entries point to the same buffer
|
||||
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||
*
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries.
|
||||
*/
|
||||
if (req->src != req->dst)
|
||||
qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
|
||||
else
|
||||
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
|
||||
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
|
||||
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
if (unlikely(!edesc)) {
|
||||
dev_err(qidev, "could not allocate extended descriptor\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||
memcpy(iv, req->iv, ivsize);
|
||||
|
||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
|
||||
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(qidev, iv_dma)) {
|
||||
dev_err(qidev, "unable to map IV\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
edesc->drv_req.drv_ctx = drv_ctx;
|
||||
|
||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
||||
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
|
||||
|
||||
if (mapped_dst_nents > 1)
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
dst_sg_idx, 0);
|
||||
if (req->src != req->dst)
|
||||
sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
|
||||
|
||||
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
|
||||
ivsize, 0);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
|
||||
dev_err(qidev, "unable to map S/G table\n");
|
||||
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
|
||||
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
|
||||
ivsize + req->cryptlen, 0);
|
||||
|
||||
if (req->src == req->dst) {
|
||||
if (req->src == req->dst)
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
|
||||
sizeof(*sg_table), req->cryptlen, 0);
|
||||
} else if (mapped_dst_nents > 1) {
|
||||
sizeof(*sg_table), req->cryptlen + ivsize,
|
||||
0);
|
||||
else
|
||||
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
|
||||
sizeof(*sg_table), req->cryptlen, 0);
|
||||
} else {
|
||||
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
|
||||
req->cryptlen, 0);
|
||||
}
|
||||
sizeof(*sg_table), req->cryptlen + ivsize,
|
||||
0);
|
||||
|
||||
return edesc;
|
||||
}
|
||||
@@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
||||
struct skcipher_edesc *edesc;
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
int ret;
|
||||
|
||||
if (unlikely(caam_congested))
|
||||
@@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block.
|
||||
*/
|
||||
if (!encrypt)
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
|
||||
ivsize, ivsize, 0);
|
||||
|
||||
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
|
||||
if (!ret) {
|
||||
ret = -EINPROGRESS;
|
||||
@@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
bool uses_dkp)
|
||||
{
|
||||
struct caam_drv_private *priv;
|
||||
struct device *dev;
|
||||
|
||||
/*
|
||||
* distribute tfms across job rings to ensure in-order
|
||||
@@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
return PTR_ERR(ctx->jrdev);
|
||||
}
|
||||
|
||||
priv = dev_get_drvdata(ctx->jrdev->parent);
|
||||
dev = ctx->jrdev->parent;
|
||||
priv = dev_get_drvdata(dev);
|
||||
if (priv->era >= 6 && uses_dkp)
|
||||
ctx->dir = DMA_BIDIRECTIONAL;
|
||||
else
|
||||
ctx->dir = DMA_TO_DEVICE;
|
||||
|
||||
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
|
||||
ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
|
||||
ctx->dir);
|
||||
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
|
||||
dev_err(ctx->jrdev, "unable to map key\n");
|
||||
if (dma_mapping_error(dev, ctx->key_dma)) {
|
||||
dev_err(dev, "unable to map key\n");
|
||||
caam_jr_free(ctx->jrdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
|
||||
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
|
||||
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
|
||||
|
||||
ctx->qidev = priv->qidev;
|
||||
ctx->qidev = dev;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
ctx->drv_ctx[ENCRYPT] = NULL;
|
||||
@@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx)
|
||||
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
|
||||
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
|
||||
|
||||
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
|
||||
dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
|
||||
ctx->dir);
|
||||
|
||||
caam_jr_free(ctx->jrdev);
|
||||
}
|
||||
@@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm)
|
||||
caam_exit_common(crypto_aead_ctx(tfm));
|
||||
}
|
||||
|
||||
static void __exit caam_qi_algapi_exit(void)
|
||||
void caam_qi_algapi_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
|
||||
alg->exit = caam_aead_exit;
|
||||
}
|
||||
|
||||
static int __init caam_qi_algapi_init(void)
|
||||
int caam_qi_algapi_init(struct device *ctrldev)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
struct caam_drv_private *priv;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
int i = 0, err = 0;
|
||||
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
bool registered = false;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
of_node_put(dev_node);
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
priv = dev_get_drvdata(ctrldev);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv || !priv->qi_present) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
if (caam_dpaa2) {
|
||||
dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void)
|
||||
|
||||
err = crypto_register_skcipher(&t_alg->skcipher);
|
||||
if (err) {
|
||||
dev_warn(priv->qidev, "%s alg registration failed\n",
|
||||
dev_warn(ctrldev, "%s alg registration failed\n",
|
||||
t_alg->skcipher.base.cra_driver_name);
|
||||
continue;
|
||||
}
|
||||
@@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void)
|
||||
}
|
||||
|
||||
if (registered)
|
||||
dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
|
||||
dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
|
||||
|
||||
out_put_dev:
|
||||
put_device(ctrldev);
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(caam_qi_algapi_init);
|
||||
module_exit(caam_qi_algapi_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
|
||||
MODULE_AUTHOR("Freescale Semiconductor");
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* Copyright 2015-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017-2018 NXP
|
||||
* Copyright 2017-2019 NXP
|
||||
*/
|
||||
|
||||
#include "compat.h"
|
||||
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq)
|
||||
static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
struct scatterlist *dst, int src_nents,
|
||||
int dst_nents, dma_addr_t iv_dma, int ivsize,
|
||||
dma_addr_t qm_sg_dma, int qm_sg_bytes)
|
||||
enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
|
||||
int qm_sg_bytes)
|
||||
{
|
||||
if (dst != src) {
|
||||
if (src_nents)
|
||||
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
|
||||
}
|
||||
|
||||
if (iv_dma)
|
||||
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
|
||||
|
||||
if (qm_sg_bytes)
|
||||
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
|
||||
@@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
int src_len, dst_len = 0;
|
||||
struct aead_edesc *edesc;
|
||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||
int ivsize = 0;
|
||||
@@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
}
|
||||
|
||||
if (unlikely(req->dst != req->src)) {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen);
|
||||
src_len = req->assoclen + req->cryptlen;
|
||||
dst_len = src_len + (encrypt ? authsize : (-authsize));
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen);
|
||||
src_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
|
||||
dst_nents = sg_nents_for_len(req->dst, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize :
|
||||
(-authsize)));
|
||||
dst_nents = sg_nents_for_len(req->dst, dst_len);
|
||||
if (unlikely(dst_nents < 0)) {
|
||||
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : (-authsize)));
|
||||
dst_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(dst_nents);
|
||||
}
|
||||
@@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
mapped_dst_nents = 0;
|
||||
}
|
||||
} else {
|
||||
src_nents = sg_nents_for_len(req->src, req->assoclen +
|
||||
req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len = req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0);
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (unlikely(src_nents < 0)) {
|
||||
dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
|
||||
req->assoclen + req->cryptlen +
|
||||
(encrypt ? authsize : 0));
|
||||
src_len);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(src_nents);
|
||||
}
|
||||
@@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
/*
|
||||
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
|
||||
* Input is not contiguous.
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries. Logic:
|
||||
* if (src != dst && output S/G)
|
||||
* pad output S/G, if needed
|
||||
* else if (src == dst && S/G)
|
||||
* overlapping S/Gs; pad one of them
|
||||
* else if (input S/G) ...
|
||||
* pad input S/G, if needed
|
||||
*/
|
||||
qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
|
||||
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
|
||||
qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
|
||||
if (mapped_dst_nents > 1)
|
||||
qm_sg_nents += pad_sg_nents(mapped_dst_nents);
|
||||
else if ((req->src == req->dst) && (mapped_src_nents > 1))
|
||||
qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
|
||||
1 + !!ivsize +
|
||||
pad_sg_nents(mapped_src_nents));
|
||||
else
|
||||
qm_sg_nents = pad_sg_nents(qm_sg_nents);
|
||||
|
||||
sg_table = &edesc->sgt[0];
|
||||
qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
|
||||
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
|
||||
@@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_nents, ivsize);
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
if (dma_mapping_error(dev, iv_dma)) {
|
||||
dev_err(dev, "unable to map IV\n");
|
||||
caam_unmap(dev, req->src, req->dst, src_nents,
|
||||
dst_nents, 0, 0, 0, 0);
|
||||
dst_nents, 0, 0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
|
||||
dev_err(dev, "unable to map assoclen\n");
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
|
||||
qm_sg_index++;
|
||||
}
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
|
||||
sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
|
||||
qm_sg_index += mapped_src_nents;
|
||||
|
||||
if (mapped_dst_nents > 1)
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
qm_sg_index, 0);
|
||||
sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
|
||||
|
||||
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, qm_sg_dma)) {
|
||||
dev_err(dev, "unable to map S/G table\n");
|
||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
dpaa2_fl_set_addr(out_fle, qm_sg_dma +
|
||||
(1 + !!ivsize) * sizeof(*sg_table));
|
||||
}
|
||||
} else if (!mapped_dst_nents) {
|
||||
/*
|
||||
* crypto engine requires the output entry to be present when
|
||||
* "frame list" FD is used.
|
||||
* Since engine does not support FMT=2'b11 (unused entry type),
|
||||
* leaving out_fle zeroized is the best option.
|
||||
*/
|
||||
goto skip_out_fle;
|
||||
} else if (mapped_dst_nents == 1) {
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
|
||||
@@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
|
||||
dpaa2_fl_set_len(out_fle, out_len);
|
||||
|
||||
skip_out_fle:
|
||||
return edesc;
|
||||
}
|
||||
|
||||
@@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
||||
qm_sg_ents = 1 + mapped_src_nents;
|
||||
dst_sg_idx = qm_sg_ents;
|
||||
|
||||
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
|
||||
/*
|
||||
* Input, output HW S/G tables: [IV, src][dst, IV]
|
||||
* IV entries point to the same buffer
|
||||
* If src == dst, S/G entries are reused (S/G tables overlap)
|
||||
*
|
||||
* HW reads 4 S/G entries at a time; make sure the reads don't go beyond
|
||||
* the end of the table by allocating more S/G entries.
|
||||
*/
|
||||
if (req->src != req->dst)
|
||||
qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
|
||||
else
|
||||
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
|
||||
|
||||
qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
|
||||
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
|
||||
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
|
||||
dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
|
||||
qm_sg_ents, ivsize);
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
||||
if (unlikely(!edesc)) {
|
||||
dev_err(dev, "could not allocate extended descriptor\n");
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
||||
iv = (u8 *)(sg_table + qm_sg_ents);
|
||||
memcpy(iv, req->iv, ivsize);
|
||||
|
||||
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
|
||||
iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, iv_dma)) {
|
||||
dev_err(dev, "unable to map IV\n");
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
|
||||
0, 0, 0);
|
||||
0, DMA_NONE, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
||||
edesc->qm_sg_bytes = qm_sg_bytes;
|
||||
|
||||
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
|
||||
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
|
||||
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
|
||||
|
||||
if (mapped_dst_nents > 1)
|
||||
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
|
||||
dst_sg_idx, 0);
|
||||
if (req->src != req->dst)
|
||||
sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
|
||||
|
||||
dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
|
||||
ivsize, 0);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
|
||||
dev_err(dev, "unable to map S/G table\n");
|
||||
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
|
||||
iv_dma, ivsize, 0, 0);
|
||||
iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
|
||||
qi_cache_free(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
|
||||
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
|
||||
dpaa2_fl_set_final(in_fle, true);
|
||||
dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
|
||||
dpaa2_fl_set_len(out_fle, req->cryptlen);
|
||||
dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
|
||||
|
||||
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
|
||||
|
||||
if (req->src == req->dst) {
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
||||
|
||||
if (req->src == req->dst)
|
||||
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
|
||||
sizeof(*sg_table));
|
||||
} else if (mapped_dst_nents > 1) {
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
|
||||
else
|
||||
dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
|
||||
sizeof(*sg_table));
|
||||
} else {
|
||||
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
|
||||
dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
|
||||
}
|
||||
|
||||
return edesc;
|
||||
}
|
||||
@@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
|
||||
int ivsize = crypto_aead_ivsize(aead);
|
||||
|
||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
||||
edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
|
||||
edesc->qm_sg_bytes);
|
||||
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
@@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
|
||||
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
|
||||
edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
|
||||
edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
|
||||
edesc->qm_sg_bytes);
|
||||
}
|
||||
|
||||
static void aead_encrypt_done(void *cbk_ctx, u32 status)
|
||||
@@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
|
||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
@@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block. This is used e.g. by the CTS mode.
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
*/
|
||||
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
|
||||
ivsize, 0);
|
||||
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||
|
||||
qi_cache_free(edesc);
|
||||
skcipher_request_complete(req, ecode);
|
||||
@@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
|
||||
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
|
||||
edesc->src_nents > 1 ? 100 : ivsize, 1);
|
||||
caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
|
||||
caam_dump_sg("dst @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
|
||||
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
|
||||
|
||||
skcipher_unmap(ctx->dev, edesc, req);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block (CBC mode) or last counter (CTR mode).
|
||||
* This is used e.g. by the CTS mode.
|
||||
*/
|
||||
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize);
|
||||
|
||||
qi_cache_free(edesc);
|
||||
skcipher_request_complete(req, ecode);
|
||||
}
|
||||
@@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
||||
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
||||
struct caam_request *caam_req = skcipher_request_ctx(req);
|
||||
int ivsize = crypto_skcipher_ivsize(skcipher);
|
||||
int ret;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
@@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req)
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
/*
|
||||
* The crypto API expects us to set the IV (req->iv) to the last
|
||||
* ciphertext block.
|
||||
*/
|
||||
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
|
||||
ivsize, 0);
|
||||
|
||||
caam_req->flc = &ctx->flc[DECRYPT];
|
||||
caam_req->flc_dma = ctx->flc_dma[DECRYPT];
|
||||
caam_req->cbk = skcipher_decrypt_done;
|
||||
@@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
|
||||
if (to_hash) {
|
||||
struct dpaa2_sg_entry *sg_table;
|
||||
int src_len = req->nbytes - *next_buflen;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src,
|
||||
req->nbytes - (*next_buflen));
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (src_nents < 0) {
|
||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
@@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
qm_sg_src_index = 1 + (*buflen ? 1 : 0);
|
||||
qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
|
||||
qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
|
||||
sizeof(*sg_table);
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
@@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
goto unmap_ctx;
|
||||
|
||||
if (mapped_nents) {
|
||||
sg_to_qm_sg_last(req->src, mapped_nents,
|
||||
sg_to_qm_sg_last(req->src, src_len,
|
||||
sg_table + qm_sg_src_index, 0);
|
||||
if (*next_buflen)
|
||||
scatterwalk_map_and_copy(next_buf, req->src,
|
||||
@@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = *current_buflen(state);
|
||||
int qm_sg_bytes, qm_sg_src_index;
|
||||
int qm_sg_bytes;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct ahash_edesc *edesc;
|
||||
struct dpaa2_sg_entry *sg_table;
|
||||
@@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
if (!edesc)
|
||||
return -ENOMEM;
|
||||
|
||||
qm_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
|
||||
qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
@@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
|
||||
dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
qm_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
|
||||
qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
|
||||
sizeof(*sg_table);
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
|
||||
@@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
|
||||
sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||
int qm_sg_bytes;
|
||||
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
|
||||
|
||||
qm_sg_bytes = mapped_nents * sizeof(*sg_table);
|
||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
|
||||
qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
|
||||
sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
|
||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
||||
qm_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
|
||||
@@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
|
||||
if (to_hash) {
|
||||
struct dpaa2_sg_entry *sg_table;
|
||||
int src_len = req->nbytes - *next_buflen;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src,
|
||||
req->nbytes - *next_buflen);
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (src_nents < 0) {
|
||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
@@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
|
||||
qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
|
||||
sizeof(*sg_table);
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
|
||||
sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
|
||||
|
||||
if (*next_buflen)
|
||||
scatterwalk_map_and_copy(next_buf, req->src,
|
||||
@@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
}
|
||||
|
||||
edesc->src_nents = src_nents;
|
||||
qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
|
||||
qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
|
||||
sg_table = &edesc->sgt[0];
|
||||
|
||||
ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
|
||||
sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
|
||||
|
||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
|
||||
if (to_hash) {
|
||||
struct dpaa2_sg_entry *sg_table;
|
||||
int src_len = req->nbytes - *next_buflen;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src,
|
||||
req->nbytes - (*next_buflen));
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (src_nents < 0) {
|
||||
dev_err(ctx->dev, "Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
@@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
if (mapped_nents > 1) {
|
||||
int qm_sg_bytes;
|
||||
|
||||
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
|
||||
qm_sg_bytes = mapped_nents * sizeof(*sg_table);
|
||||
sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
|
||||
qm_sg_bytes = pad_sg_nents(mapped_nents) *
|
||||
sizeof(*sg_table);
|
||||
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
|
||||
qm_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
|
@@ -82,14 +82,6 @@
|
||||
#define HASH_MSG_LEN 8
|
||||
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
|
||||
|
||||
#ifdef DEBUG
|
||||
/* for print_hex_dumps with line references */
|
||||
#define debug(format, arg...) printk(format, arg)
|
||||
#else
|
||||
#define debug(format, arg...)
|
||||
#endif
|
||||
|
||||
|
||||
static struct list_head hash_list;
|
||||
|
||||
/* ahash per-session context */
|
||||
@@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, true, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash update shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
/* ahash_update_first shared descriptor */
|
||||
desc = ctx->sh_desc_update_first;
|
||||
@@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, false, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash update first shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
|
||||
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
/* ahash_final shared descriptor */
|
||||
desc = ctx->sh_desc_fin;
|
||||
@@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, true, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
/* ahash_digest shared descriptor */
|
||||
desc = ctx->sh_desc_digest;
|
||||
@@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, false, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ahash digest shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, ctx->key_dma);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
|
||||
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
/* shared descriptor for ahash_digest */
|
||||
desc = ctx->sh_desc_digest;
|
||||
@@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash)
|
||||
ctx->ctx_len, 0);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
|
||||
" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
/* shared descriptor for ahash_digest */
|
||||
@@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
||||
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
result.err = 0;
|
||||
init_completion(&result.completion);
|
||||
@@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
|
||||
/* in progress */
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"digested key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key,
|
||||
digestsize, 1);
|
||||
}
|
||||
dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
|
||||
|
||||
@@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
|
||||
int ret;
|
||||
u8 *hashed_key = NULL;
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_ERR "keylen %d\n", keylen);
|
||||
#endif
|
||||
dev_dbg(jrdev, "keylen %d\n", keylen);
|
||||
|
||||
if (keylen > blocksize) {
|
||||
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
|
||||
@@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
#ifdef DEBUG
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
@@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
@@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
#ifdef DEBUG
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
@@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
|
||||
switch_buf(state);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
@@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
#ifdef DEBUG
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
@@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
memcpy(req->result, state->caam_ctx, digestsize);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
@@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||
#ifdef DEBUG
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
|
||||
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
|
||||
if (err)
|
||||
@@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
switch_buf(state);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
|
||||
ctx->ctx_len, 1);
|
||||
if (req->result)
|
||||
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("result@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
|
||||
digestsize, 1);
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
@@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
|
||||
|
||||
if (nents > 1 || first_sg) {
|
||||
struct sec4_sg_entry *sg = edesc->sec4_sg;
|
||||
unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
|
||||
unsigned int sgsize = sizeof(*sg) *
|
||||
pad_sg_nents(first_sg + nents);
|
||||
|
||||
sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
|
||||
sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
|
||||
|
||||
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->jrdev, src_dma)) {
|
||||
@@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
}
|
||||
|
||||
if (to_hash) {
|
||||
src_nents = sg_nents_for_len(req->src,
|
||||
req->nbytes - (*next_buflen));
|
||||
int pad_nents;
|
||||
int src_len = req->nbytes - *next_buflen;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (src_nents < 0) {
|
||||
dev_err(jrdev, "Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
@@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
}
|
||||
|
||||
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
|
||||
sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
|
||||
sizeof(struct sec4_sg_entry);
|
||||
pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
|
||||
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/*
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
|
||||
ctx->sh_desc_update,
|
||||
edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
|
||||
ctx->sh_desc_update_dma, flags);
|
||||
if (!edesc) {
|
||||
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
@@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
goto unmap_ctx;
|
||||
|
||||
if (mapped_nents)
|
||||
sg_to_sec4_sg_last(req->src, mapped_nents,
|
||||
sg_to_sec4_sg_last(req->src, src_len,
|
||||
edesc->sec4_sg + sec4_sg_src_index,
|
||||
0);
|
||||
else
|
||||
@@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
|
||||
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
|
||||
if (ret)
|
||||
@@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
*buflen = *next_buflen;
|
||||
*next_buflen = last_buflen;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
@@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int buflen = *current_buflen(state);
|
||||
u32 *desc;
|
||||
int sec4_sg_bytes, sec4_sg_src_index;
|
||||
int sec4_sg_bytes;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct ahash_edesc *edesc;
|
||||
int ret;
|
||||
|
||||
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
||||
sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
|
||||
sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
|
||||
ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
|
||||
flags);
|
||||
edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
|
||||
ctx->sh_desc_fin_dma, flags);
|
||||
if (!edesc)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
|
||||
sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
@@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
LDST_SGF);
|
||||
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||
if (ret)
|
||||
@@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
|
||||
append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
|
||||
if (ret)
|
||||
@@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
@@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
@@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
}
|
||||
|
||||
if (to_hash) {
|
||||
src_nents = sg_nents_for_len(req->src,
|
||||
req->nbytes - *next_buflen);
|
||||
int pad_nents;
|
||||
int src_len = req->nbytes - *next_buflen;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, src_len);
|
||||
if (src_nents < 0) {
|
||||
dev_err(jrdev, "Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
@@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
mapped_nents = 0;
|
||||
}
|
||||
|
||||
sec4_sg_bytes = (1 + mapped_nents) *
|
||||
sizeof(struct sec4_sg_entry);
|
||||
pad_nents = pad_sg_nents(1 + mapped_nents);
|
||||
sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/*
|
||||
* allocate space for base edesc and hw desc commands,
|
||||
* link tables
|
||||
*/
|
||||
edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
|
||||
edesc = ahash_edesc_alloc(ctx, pad_nents,
|
||||
ctx->sh_desc_update_first,
|
||||
ctx->sh_desc_update_first_dma,
|
||||
flags);
|
||||
@@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
sg_to_sec4_sg_last(req->src, mapped_nents,
|
||||
edesc->sec4_sg + 1, 0);
|
||||
sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
|
||||
|
||||
if (*next_buflen) {
|
||||
scatterwalk_map_and_copy(next_buf, req->src,
|
||||
@@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||
if (ret)
|
||||
@@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
*buflen = *next_buflen;
|
||||
*next_buflen = 0;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
|
||||
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
|
||||
1);
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
@@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
|
||||
if (!ret) {
|
||||
@@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
if (ret)
|
||||
goto unmap_ctx;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc,
|
||||
desc_bytes(desc), 1);
|
||||
|
||||
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
|
||||
if (ret)
|
||||
@@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
req->nbytes, 0);
|
||||
switch_buf(state);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
|
||||
*next_buflen, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
|
||||
1);
|
||||
|
||||
return ret;
|
||||
unmap_ctx:
|
||||
@@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
|
||||
caam_jr_free(ctx->jrdev);
|
||||
}
|
||||
|
||||
static void __exit caam_algapi_hash_exit(void)
|
||||
void caam_algapi_hash_exit(void)
|
||||
{
|
||||
struct caam_hash_alg *t_alg, *n;
|
||||
|
||||
@@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template,
|
||||
return t_alg;
|
||||
}
|
||||
|
||||
static int __init caam_algapi_hash_init(void)
|
||||
int caam_algapi_hash_init(struct device *ctrldev)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
int i = 0, err = 0;
|
||||
struct caam_drv_private *priv;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
unsigned int md_limit = SHA512_DIGEST_SIZE;
|
||||
u32 md_inst, md_vid;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = dev_get_drvdata(&pdev->dev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register crypto algorithms the device supports. First, identify
|
||||
* presence and attributes of MD block.
|
||||
@@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void)
|
||||
* Skip registration of any hashing algorithms if MD block
|
||||
* is not present.
|
||||
*/
|
||||
if (!md_inst) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
if (!md_inst)
|
||||
return -ENODEV;
|
||||
|
||||
/* Limit digest size based on LP256 */
|
||||
if (md_vid == CHA_VER_VID_MD_LP256)
|
||||
@@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void)
|
||||
list_add_tail(&t_alg->entry, &hash_list);
|
||||
}
|
||||
|
||||
out_put_dev:
|
||||
put_device(&pdev->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(caam_algapi_hash_init);
|
||||
module_exit(caam_algapi_hash_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
|
||||
MODULE_AUTHOR("Freescale Semiconductor - NMG");
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* caam - Freescale FSL CAAM support for Public Key Cryptography
|
||||
*
|
||||
* Copyright 2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2018 NXP
|
||||
* Copyright 2018-2019 NXP
|
||||
*
|
||||
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
|
||||
* all the desired key parameters, input and output pointers.
|
||||
@@ -24,12 +24,18 @@
|
||||
sizeof(struct rsa_priv_f2_pdb))
|
||||
#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
|
||||
sizeof(struct rsa_priv_f3_pdb))
|
||||
#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
|
||||
|
||||
/* buffer filled with zeros, used for padding */
|
||||
static u8 *zero_buffer;
|
||||
|
||||
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
|
||||
struct akcipher_request *req)
|
||||
{
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
|
||||
dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
|
||||
|
||||
if (edesc->sec4_sg_bytes)
|
||||
dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
|
||||
@@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
|
||||
akcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
/**
|
||||
* Count leading zeros, need it to strip, from a given scatterlist
|
||||
*
|
||||
* @sgl : scatterlist to count zeros from
|
||||
* @nbytes: number of zeros, in bytes, to strip
|
||||
* @flags : operation flags
|
||||
*/
|
||||
static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
|
||||
unsigned int nbytes,
|
||||
unsigned int flags)
|
||||
@@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
|
||||
lzeros = 0;
|
||||
len = 0;
|
||||
while (nbytes > 0) {
|
||||
while (len && !*buff) {
|
||||
/* do not strip more than given bytes */
|
||||
while (len && !*buff && lzeros < nbytes) {
|
||||
lzeros++;
|
||||
len--;
|
||||
buff++;
|
||||
@@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct device *dev = ctx->dev;
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct rsa_edesc *edesc;
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
@@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
int sgc;
|
||||
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
|
||||
int src_nents, dst_nents;
|
||||
unsigned int diff_size = 0;
|
||||
int lzeros;
|
||||
|
||||
lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
|
||||
if (lzeros < 0)
|
||||
return ERR_PTR(lzeros);
|
||||
if (req->src_len > key->n_sz) {
|
||||
/*
|
||||
* strip leading zeros and
|
||||
* return the number of zeros to skip
|
||||
*/
|
||||
lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
|
||||
key->n_sz, sg_flags);
|
||||
if (lzeros < 0)
|
||||
return ERR_PTR(lzeros);
|
||||
|
||||
req->src_len -= lzeros;
|
||||
req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
|
||||
req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
|
||||
lzeros);
|
||||
req_ctx->fixup_src_len = req->src_len - lzeros;
|
||||
} else {
|
||||
/*
|
||||
* input src is less then n key modulus,
|
||||
* so there will be zero padding
|
||||
*/
|
||||
diff_size = key->n_sz - req->src_len;
|
||||
req_ctx->fixup_src = req->src;
|
||||
req_ctx->fixup_src_len = req->src_len;
|
||||
}
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, req->src_len);
|
||||
src_nents = sg_nents_for_len(req_ctx->fixup_src,
|
||||
req_ctx->fixup_src_len);
|
||||
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
|
||||
|
||||
if (src_nents > 1)
|
||||
sec4_sg_len = src_nents;
|
||||
if (!diff_size && src_nents == 1)
|
||||
sec4_sg_len = 0; /* no need for an input hw s/g table */
|
||||
else
|
||||
sec4_sg_len = src_nents + !!diff_size;
|
||||
sec4_sg_index = sec4_sg_len;
|
||||
if (dst_nents > 1)
|
||||
sec4_sg_len += dst_nents;
|
||||
sec4_sg_len += pad_sg_nents(dst_nents);
|
||||
else
|
||||
sec4_sg_len = pad_sg_nents(sec4_sg_len);
|
||||
|
||||
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
|
||||
|
||||
@@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
if (!edesc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
|
||||
if (unlikely(!sgc)) {
|
||||
dev_err(dev, "unable to map source\n");
|
||||
goto src_fail;
|
||||
@@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
}
|
||||
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
|
||||
if (diff_size)
|
||||
dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
|
||||
0);
|
||||
|
||||
if (sec4_sg_index)
|
||||
sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
|
||||
edesc->sec4_sg + !!diff_size, 0);
|
||||
|
||||
sec4_sg_index = 0;
|
||||
if (src_nents > 1) {
|
||||
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
|
||||
sec4_sg_index += src_nents;
|
||||
}
|
||||
if (dst_nents > 1)
|
||||
sg_to_sec4_sg_last(req->dst, dst_nents,
|
||||
sg_to_sec4_sg_last(req->dst, req->dst_len,
|
||||
edesc->sec4_sg + sec4_sg_index, 0);
|
||||
|
||||
/* Save nents for later use in Job Descriptor */
|
||||
@@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
|
||||
print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
|
||||
edesc->sec4_sg_bytes, 1);
|
||||
|
||||
return edesc;
|
||||
|
||||
sec4_sg_fail:
|
||||
dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
|
||||
dst_fail:
|
||||
dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
|
||||
src_fail:
|
||||
kfree(edesc);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
||||
struct rsa_edesc *edesc)
|
||||
{
|
||||
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
struct device *dev = ctx->dev;
|
||||
@@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
||||
pdb->f_dma = edesc->sec4_sg_dma;
|
||||
sec4_sg_index += edesc->src_nents;
|
||||
} else {
|
||||
pdb->f_dma = sg_dma_address(req->src);
|
||||
pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
|
||||
}
|
||||
|
||||
if (edesc->dst_nents > 1) {
|
||||
@@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req,
|
||||
}
|
||||
|
||||
pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
|
||||
pdb->f_len = req->src_len;
|
||||
pdb->f_len = req_ctx->fixup_src_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
|
||||
pdb->g_dma = edesc->sec4_sg_dma;
|
||||
sec4_sg_index += edesc->src_nents;
|
||||
} else {
|
||||
pdb->g_dma = sg_dma_address(req->src);
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
|
||||
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||
}
|
||||
|
||||
if (edesc->dst_nents > 1) {
|
||||
@@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
|
||||
pdb->g_dma = edesc->sec4_sg_dma;
|
||||
sec4_sg_index += edesc->src_nents;
|
||||
} else {
|
||||
pdb->g_dma = sg_dma_address(req->src);
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
|
||||
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||
}
|
||||
|
||||
if (edesc->dst_nents > 1) {
|
||||
@@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
|
||||
pdb->g_dma = edesc->sec4_sg_dma;
|
||||
sec4_sg_index += edesc->src_nents;
|
||||
} else {
|
||||
pdb->g_dma = sg_dma_address(req->src);
|
||||
struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
|
||||
|
||||
pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
|
||||
}
|
||||
|
||||
if (edesc->dst_nents > 1) {
|
||||
@@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
|
||||
return PTR_ERR(ctx->dev);
|
||||
}
|
||||
|
||||
ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
|
||||
CAAM_RSA_MAX_INPUT_SIZE - 1,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
|
||||
dev_err(ctx->dev, "unable to map padding\n");
|
||||
caam_jr_free(ctx->dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
|
||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct caam_rsa_key *key = &ctx->key;
|
||||
|
||||
dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
|
||||
1, DMA_TO_DEVICE);
|
||||
caam_rsa_free_key(key);
|
||||
caam_jr_free(ctx->dev);
|
||||
}
|
||||
@@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = {
|
||||
};
|
||||
|
||||
/* Public Key Cryptography module initialization handler */
|
||||
static int __init caam_pkc_init(void)
|
||||
int caam_pkc_init(struct device *ctrldev)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
struct caam_drv_private *priv;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
u32 pk_inst;
|
||||
int err;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
priv = dev_get_drvdata(ctrldev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
/* Determine public key hardware accelerator presence. */
|
||||
if (priv->era < 10)
|
||||
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
||||
@@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void)
|
||||
pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
|
||||
|
||||
/* Do not register algorithms if PKHA is not present. */
|
||||
if (!pk_inst) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
if (!pk_inst)
|
||||
return 0;
|
||||
|
||||
/* allocate zero buffer, used for padding input */
|
||||
zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
|
||||
GFP_KERNEL);
|
||||
if (!zero_buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
err = crypto_register_akcipher(&caam_rsa);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree(zero_buffer);
|
||||
dev_warn(ctrldev, "%s alg registration failed\n",
|
||||
caam_rsa.base.cra_driver_name);
|
||||
else
|
||||
} else {
|
||||
dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
|
||||
}
|
||||
|
||||
out_put_dev:
|
||||
put_device(ctrldev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit caam_pkc_exit(void)
|
||||
void caam_pkc_exit(void)
|
||||
{
|
||||
kfree(zero_buffer);
|
||||
crypto_unregister_akcipher(&caam_rsa);
|
||||
}
|
||||
|
||||
module_init(caam_pkc_init);
|
||||
module_exit(caam_pkc_exit);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
|
||||
MODULE_AUTHOR("Freescale Semiconductor");
|
||||
|
@@ -89,18 +89,25 @@ struct caam_rsa_key {
|
||||
* caam_rsa_ctx - per session context.
|
||||
* @key : RSA key in DMA zone
|
||||
* @dev : device structure
|
||||
* @padding_dma : dma address of padding, for adding it to the input
|
||||
*/
|
||||
struct caam_rsa_ctx {
|
||||
struct caam_rsa_key key;
|
||||
struct device *dev;
|
||||
dma_addr_t padding_dma;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* caam_rsa_req_ctx - per request context.
|
||||
* @src: input scatterlist (stripped of leading zeros)
|
||||
* @src : input scatterlist (stripped of leading zeros)
|
||||
* @fixup_src : input scatterlist (that might be stripped of leading zeros)
|
||||
* @fixup_src_len : length of the fixup_src input scatterlist
|
||||
*/
|
||||
struct caam_rsa_req_ctx {
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist *fixup_src;
|
||||
unsigned int fixup_src_len;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* caam - Freescale FSL CAAM support for hw_random
|
||||
*
|
||||
* Copyright 2011 Freescale Semiconductor, Inc.
|
||||
* Copyright 2018 NXP
|
||||
* Copyright 2018-2019 NXP
|
||||
*
|
||||
* Based on caamalg.c crypto API driver.
|
||||
*
|
||||
@@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
|
||||
/* Buffer refilled, invalidate cache */
|
||||
dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
bd->buf, RN_BUF_SIZE, 1);
|
||||
}
|
||||
|
||||
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
|
||||
@@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
||||
}
|
||||
|
||||
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -296,47 +294,20 @@ static struct hwrng caam_rng = {
|
||||
.read = caam_read,
|
||||
};
|
||||
|
||||
static void __exit caam_rng_exit(void)
|
||||
void caam_rng_exit(void)
|
||||
{
|
||||
caam_jr_free(rng_ctx->jrdev);
|
||||
hwrng_unregister(&caam_rng);
|
||||
kfree(rng_ctx);
|
||||
}
|
||||
|
||||
static int __init caam_rng_init(void)
|
||||
int caam_rng_init(struct device *ctrldev)
|
||||
{
|
||||
struct device *dev;
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct caam_drv_private *priv;
|
||||
u32 rng_inst;
|
||||
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
|
||||
int err;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = dev_get_drvdata(&pdev->dev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
|
||||
/* Check for an instantiated RNG before registration */
|
||||
if (priv->era < 10)
|
||||
rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
|
||||
@@ -344,16 +315,13 @@ static int __init caam_rng_init(void)
|
||||
else
|
||||
rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
|
||||
|
||||
if (!rng_inst) {
|
||||
err = -ENODEV;
|
||||
goto out_put_dev;
|
||||
}
|
||||
if (!rng_inst)
|
||||
return 0;
|
||||
|
||||
dev = caam_jr_alloc();
|
||||
if (IS_ERR(dev)) {
|
||||
pr_err("Job Ring Device allocation for transform failed\n");
|
||||
err = PTR_ERR(dev);
|
||||
goto out_put_dev;
|
||||
return PTR_ERR(dev);
|
||||
}
|
||||
rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
|
||||
if (!rng_ctx) {
|
||||
@@ -364,7 +332,6 @@ static int __init caam_rng_init(void)
|
||||
if (err)
|
||||
goto free_rng_ctx;
|
||||
|
||||
put_device(&pdev->dev);
|
||||
dev_info(dev, "registering rng-caam\n");
|
||||
return hwrng_register(&caam_rng);
|
||||
|
||||
@@ -372,14 +339,5 @@ free_rng_ctx:
|
||||
kfree(rng_ctx);
|
||||
free_caam_alloc:
|
||||
caam_jr_free(dev);
|
||||
out_put_dev:
|
||||
put_device(&pdev->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
module_init(caam_rng_init);
|
||||
module_exit(caam_rng_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
|
||||
MODULE_AUTHOR("Freescale Semiconductor - NMG");
|
||||
|
@@ -3,7 +3,7 @@
|
||||
* Controller-level driver, kernel property detection, initialization
|
||||
*
|
||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||
* Copyright 2018 NXP
|
||||
* Copyright 2018-2019 NXP
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
@@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev)
|
||||
of_platform_depopulate(ctrldev);
|
||||
|
||||
#ifdef CONFIG_CAAM_QI
|
||||
if (ctrlpriv->qidev)
|
||||
caam_qi_shutdown(ctrlpriv->qidev);
|
||||
if (ctrlpriv->qi_init)
|
||||
caam_qi_shutdown(ctrldev);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->caam_ipg = clk;
|
||||
|
||||
if (!of_machine_is_compatible("fsl,imx7d") &&
|
||||
!of_machine_is_compatible("fsl,imx7s")) {
|
||||
!of_machine_is_compatible("fsl,imx7s") &&
|
||||
!of_machine_is_compatible("fsl,imx7ulp")) {
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
@@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
|
||||
if (!of_machine_is_compatible("fsl,imx6ul") &&
|
||||
!of_machine_is_compatible("fsl,imx7d") &&
|
||||
!of_machine_is_compatible("fsl,imx7s")) {
|
||||
!of_machine_is_compatible("fsl,imx7s") &&
|
||||
!of_machine_is_compatible("fsl,imx7ulp")) {
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
@@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
ctrlpriv->era = caam_get_era(ctrl);
|
||||
|
||||
ret = of_platform_populate(nprop, caam_match, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "JR platform devices creation error\n");
|
||||
goto iounmap_ctrl;
|
||||
}
|
||||
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
@@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev)
|
||||
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
|
||||
#endif
|
||||
|
||||
ring = 0;
|
||||
for_each_available_child_of_node(nprop, np)
|
||||
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
||||
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
(ring + JR_BLOCK_NUMBER) *
|
||||
BLOCK_OFFSET
|
||||
);
|
||||
ctrlpriv->total_jobrs++;
|
||||
ring++;
|
||||
}
|
||||
|
||||
/* Check to see if (DPAA 1.x) QI present. If so, enable */
|
||||
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
|
||||
if (ctrlpriv->qi_present && !caam_dpaa2) {
|
||||
@@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev)
|
||||
#endif
|
||||
}
|
||||
|
||||
ret = of_platform_populate(nprop, caam_match, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "JR platform devices creation error\n");
|
||||
goto shutdown_qi;
|
||||
}
|
||||
|
||||
ring = 0;
|
||||
for_each_available_child_of_node(nprop, np)
|
||||
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
|
||||
ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
|
||||
((__force uint8_t *)ctrl +
|
||||
(ring + JR_BLOCK_NUMBER) *
|
||||
BLOCK_OFFSET
|
||||
);
|
||||
ctrlpriv->total_jobrs++;
|
||||
ring++;
|
||||
}
|
||||
|
||||
/* If no QI and no rings specified, quit and go home */
|
||||
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
|
||||
dev_err(dev, "no queues configured, terminating\n");
|
||||
@@ -898,6 +901,11 @@ caam_remove:
|
||||
caam_remove(pdev);
|
||||
return ret;
|
||||
|
||||
shutdown_qi:
|
||||
#ifdef CONFIG_CAAM_QI
|
||||
if (ctrlpriv->qi_init)
|
||||
caam_qi_shutdown(dev);
|
||||
#endif
|
||||
iounmap_ctrl:
|
||||
iounmap(ctrl);
|
||||
disable_caam_emi_slow:
|
||||
|
@@ -3,6 +3,7 @@
|
||||
* caam descriptor construction helper functions
|
||||
*
|
||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||
* Copyright 2019 NXP
|
||||
*/
|
||||
|
||||
#ifndef DESC_CONSTR_H
|
||||
@@ -37,6 +38,16 @@
|
||||
|
||||
extern bool caam_little_end;
|
||||
|
||||
/*
|
||||
* HW fetches 4 S/G table entries at a time, irrespective of how many entries
|
||||
* are in the table. It's SW's responsibility to make sure these accesses
|
||||
* do not have side effects.
|
||||
*/
|
||||
static inline int pad_sg_nents(int sg_nents)
|
||||
{
|
||||
return ALIGN(sg_nents, 4);
|
||||
}
|
||||
|
||||
static inline int desc_len(u32 * const desc)
|
||||
{
|
||||
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
|
||||
|
@@ -13,7 +13,7 @@
|
||||
#ifdef DEBUG
|
||||
#include <linux/highmem.h>
|
||||
|
||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
||||
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize, struct scatterlist *sg,
|
||||
size_t tlen, bool ascii)
|
||||
{
|
||||
@@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
||||
|
||||
buf = it_page + it->offset;
|
||||
len = min_t(size_t, tlen, it->length);
|
||||
print_hex_dump(level, prefix_str, prefix_type, rowsize,
|
||||
groupsize, buf, len, ascii);
|
||||
print_hex_dump_debug(prefix_str, prefix_type, rowsize,
|
||||
groupsize, buf, len, ascii);
|
||||
tlen -= len;
|
||||
|
||||
kunmap_atomic(it_page);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
||||
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize, struct scatterlist *sg,
|
||||
size_t tlen, bool ascii)
|
||||
{}
|
||||
|
@@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
|
||||
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
|
||||
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
|
||||
|
||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
||||
void caam_dump_sg(const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize, struct scatterlist *sg,
|
||||
size_t tlen, bool ascii);
|
||||
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* Private/internal definitions between modules
|
||||
*
|
||||
* Copyright 2008-2011 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Copyright 2019 NXP
|
||||
*/
|
||||
|
||||
#ifndef INTERN_H
|
||||
@@ -63,10 +63,6 @@ struct caam_drv_private_jr {
|
||||
* Driver-private storage for a single CAAM block instance
|
||||
*/
|
||||
struct caam_drv_private {
|
||||
#ifdef CONFIG_CAAM_QI
|
||||
struct device *qidev;
|
||||
#endif
|
||||
|
||||
/* Physical-presence section */
|
||||
struct caam_ctrl __iomem *ctrl; /* controller region */
|
||||
struct caam_deco __iomem *deco; /* DECO/CCB views */
|
||||
@@ -74,12 +70,17 @@ struct caam_drv_private {
|
||||
struct caam_queue_if __iomem *qi; /* QI control region */
|
||||
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
|
||||
|
||||
struct iommu_domain *domain;
|
||||
|
||||
/*
|
||||
* Detected geometry block. Filled in from device tree if powerpc,
|
||||
* or from register-based version detection code
|
||||
*/
|
||||
u8 total_jobrs; /* Total Job Rings in device */
|
||||
u8 qi_present; /* Nonzero if QI present in device */
|
||||
#ifdef CONFIG_CAAM_QI
|
||||
u8 qi_init; /* Nonzero if QI has been initialized */
|
||||
#endif
|
||||
u8 mc_en; /* Nonzero if MC f/w is active */
|
||||
int secvio_irq; /* Security violation interrupt number */
|
||||
int virt_en; /* Virtualization enabled in CAAM */
|
||||
@@ -107,8 +108,95 @@ struct caam_drv_private {
|
||||
#endif
|
||||
};
|
||||
|
||||
void caam_jr_algapi_init(struct device *dev);
|
||||
void caam_jr_algapi_remove(struct device *dev);
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
|
||||
int caam_algapi_init(struct device *dev);
|
||||
void caam_algapi_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int caam_algapi_init(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_algapi_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
|
||||
int caam_algapi_hash_init(struct device *dev);
|
||||
void caam_algapi_hash_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int caam_algapi_hash_init(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_algapi_hash_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
|
||||
int caam_pkc_init(struct device *dev);
|
||||
void caam_pkc_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int caam_pkc_init(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_pkc_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
|
||||
int caam_rng_init(struct device *dev);
|
||||
void caam_rng_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int caam_rng_init(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_rng_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
|
||||
|
||||
#ifdef CONFIG_CAAM_QI
|
||||
|
||||
int caam_qi_algapi_init(struct device *dev);
|
||||
void caam_qi_algapi_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int caam_qi_algapi_init(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void caam_qi_algapi_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CAAM_QI */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int caam_debugfs_u64_get(void *data, u64 *val)
|
||||
|
@@ -4,6 +4,7 @@
|
||||
* JobR backend functionality
|
||||
*
|
||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||
* Copyright 2019 NXP
|
||||
*/
|
||||
|
||||
#include <linux/of_irq.h>
|
||||
@@ -23,6 +24,43 @@ struct jr_driver_data {
|
||||
} ____cacheline_aligned;
|
||||
|
||||
static struct jr_driver_data driver_data;
|
||||
static DEFINE_MUTEX(algs_lock);
|
||||
static unsigned int active_devs;
|
||||
|
||||
static void register_algs(struct device *dev)
|
||||
{
|
||||
mutex_lock(&algs_lock);
|
||||
|
||||
if (++active_devs != 1)
|
||||
goto algs_unlock;
|
||||
|
||||
caam_algapi_init(dev);
|
||||
caam_algapi_hash_init(dev);
|
||||
caam_pkc_init(dev);
|
||||
caam_rng_init(dev);
|
||||
caam_qi_algapi_init(dev);
|
||||
|
||||
algs_unlock:
|
||||
mutex_unlock(&algs_lock);
|
||||
}
|
||||
|
||||
static void unregister_algs(void)
|
||||
{
|
||||
mutex_lock(&algs_lock);
|
||||
|
||||
if (--active_devs != 0)
|
||||
goto algs_unlock;
|
||||
|
||||
caam_qi_algapi_exit();
|
||||
|
||||
caam_rng_exit();
|
||||
caam_pkc_exit();
|
||||
caam_algapi_hash_exit();
|
||||
caam_algapi_exit();
|
||||
|
||||
algs_unlock:
|
||||
mutex_unlock(&algs_lock);
|
||||
}
|
||||
|
||||
static int caam_reset_hw_jr(struct device *dev)
|
||||
{
|
||||
@@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Unregister JR-based RNG & crypto algorithms */
|
||||
unregister_algs();
|
||||
|
||||
/* Remove the node from Physical JobR list maintained by driver */
|
||||
spin_lock(&driver_data.jr_alloc_lock);
|
||||
list_del(&jrpriv->list_node);
|
||||
@@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
|
||||
atomic_set(&jrpriv->tfm_count, 0);
|
||||
|
||||
register_algs(jrdev->parent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
|
||||
{
|
||||
struct split_key_result *res = context;
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
#endif
|
||||
dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
|
||||
|
||||
if (err)
|
||||
caam_jr_strstatus(dev, err);
|
||||
@@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
adata->keylen_pad = split_key_pad_len(adata->algtype &
|
||||
OP_ALG_ALGSEL_MASK);
|
||||
|
||||
#ifdef DEBUG
|
||||
dev_err(jrdev, "split keylen %d split keylen padded %d\n",
|
||||
dev_dbg(jrdev, "split keylen %d split keylen padded %d\n",
|
||||
adata->keylen, adata->keylen_pad);
|
||||
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
||||
#endif
|
||||
print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
|
||||
|
||||
if (adata->keylen_pad > max_keylen)
|
||||
return -EINVAL;
|
||||
@@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
append_fifo_store(desc, dma_addr, adata->keylen,
|
||||
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
|
||||
1);
|
||||
|
||||
result.err = 0;
|
||||
init_completion(&result.completion);
|
||||
@@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
|
||||
/* in progress */
|
||||
wait_for_completion(&result.completion);
|
||||
ret = result.err;
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
||||
adata->keylen_pad, 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
|
||||
adata->keylen_pad, 1);
|
||||
}
|
||||
|
||||
dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* Queue Interface backend functionality
|
||||
*
|
||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2017 NXP
|
||||
* Copyright 2016-2017, 2019 NXP
|
||||
*/
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "desc_constr.h"
|
||||
|
||||
#define PREHDR_RSLS_SHIFT 31
|
||||
#define PREHDR_ABS BIT(25)
|
||||
|
||||
/*
|
||||
* Use a reasonable backlog of frames (per CPU) as congestion threshold,
|
||||
@@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu);
|
||||
/*
|
||||
* caam_qi_priv - CAAM QI backend private params
|
||||
* @cgr: QMan congestion group
|
||||
* @qi_pdev: platform device for QI backend
|
||||
*/
|
||||
struct caam_qi_priv {
|
||||
struct qman_cgr cgr;
|
||||
struct platform_device *qi_pdev;
|
||||
};
|
||||
|
||||
static struct caam_qi_priv qipriv ____cacheline_aligned;
|
||||
@@ -95,6 +94,16 @@ static u64 times_congested;
|
||||
*/
|
||||
static struct kmem_cache *qi_cache;
|
||||
|
||||
static void *caam_iova_to_virt(struct iommu_domain *domain,
|
||||
dma_addr_t iova_addr)
|
||||
{
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
|
||||
|
||||
return phys_to_virt(phys_addr);
|
||||
}
|
||||
|
||||
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
||||
{
|
||||
struct qm_fd fd;
|
||||
@@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
||||
const struct qm_fd *fd;
|
||||
struct caam_drv_req *drv_req;
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||
|
||||
fd = &msg->ern.fd;
|
||||
|
||||
@@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
||||
return;
|
||||
}
|
||||
|
||||
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
|
||||
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||
if (!drv_req) {
|
||||
dev_err(qidev,
|
||||
"Can't find original request for CAAM response\n");
|
||||
@@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
|
||||
*/
|
||||
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
||||
num_words);
|
||||
drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
|
||||
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
||||
dma_sync_single_for_device(qidev, drv_ctx->context_a,
|
||||
sizeof(drv_ctx->sh_desc) +
|
||||
@@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
|
||||
*/
|
||||
drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
|
||||
num_words);
|
||||
drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
|
||||
memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
|
||||
size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
|
||||
hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
|
||||
@@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
|
||||
void caam_qi_shutdown(struct device *qidev)
|
||||
{
|
||||
int i;
|
||||
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
|
||||
struct caam_qi_priv *priv = &qipriv;
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
|
||||
for_each_cpu(i, cpus) {
|
||||
@@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev)
|
||||
qman_release_cgrid(priv->cgr.cgrid);
|
||||
|
||||
kmem_cache_destroy(qi_cache);
|
||||
|
||||
platform_device_unregister(priv->qi_pdev);
|
||||
}
|
||||
|
||||
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
||||
@@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
struct caam_drv_req *drv_req;
|
||||
const struct qm_fd *fd;
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||
u32 status;
|
||||
|
||||
if (caam_qi_napi_schedule(p, caam_napi))
|
||||
@@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
||||
drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
|
||||
drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
|
||||
if (unlikely(!drv_req)) {
|
||||
dev_err(qidev,
|
||||
"Can't find original request for caam response\n");
|
||||
@@ -692,33 +703,17 @@ static void free_rsp_fqs(void)
|
||||
int caam_qi_init(struct platform_device *caam_pdev)
|
||||
{
|
||||
int err, i;
|
||||
struct platform_device *qi_pdev;
|
||||
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
static struct platform_device_info qi_pdev_info = {
|
||||
.name = "caam_qi",
|
||||
.id = PLATFORM_DEVID_NONE
|
||||
};
|
||||
|
||||
qi_pdev_info.parent = ctrldev;
|
||||
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
|
||||
qi_pdev = platform_device_register_full(&qi_pdev_info);
|
||||
if (IS_ERR(qi_pdev))
|
||||
return PTR_ERR(qi_pdev);
|
||||
set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
|
||||
|
||||
ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
qidev = &qi_pdev->dev;
|
||||
|
||||
qipriv.qi_pdev = qi_pdev;
|
||||
dev_set_drvdata(qidev, &qipriv);
|
||||
qidev = ctrldev;
|
||||
|
||||
/* Initialize the congestion detection */
|
||||
err = init_cgr(qidev);
|
||||
if (err) {
|
||||
dev_err(qidev, "CGR initialization failed: %d\n", err);
|
||||
platform_device_unregister(qi_pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
if (err) {
|
||||
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
|
||||
free_rsp_fqs();
|
||||
platform_device_unregister(qi_pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
napi_enable(irqtask);
|
||||
}
|
||||
|
||||
/* Hook up QI device to parent controlling caam device */
|
||||
ctrlpriv->qidev = qidev;
|
||||
|
||||
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
|
||||
SLAB_CACHE_DMA, NULL);
|
||||
if (!qi_cache) {
|
||||
dev_err(qidev, "Can't allocate CAAM cache\n");
|
||||
free_rsp_fqs();
|
||||
platform_device_unregister(qi_pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
|
||||
×_congested, &caam_fops_u64_ro);
|
||||
#endif
|
||||
|
||||
ctrlpriv->qi_init = 1;
|
||||
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
|
||||
* but does not have final bit; instead, returns last entry
|
||||
*/
|
||||
static inline struct qm_sg_entry *
|
||||
sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
||||
sg_to_qm_sg(struct scatterlist *sg, int len,
|
||||
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
||||
{
|
||||
while (sg_count && sg) {
|
||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
|
||||
sg_dma_len(sg), offset);
|
||||
int ent_len;
|
||||
|
||||
while (len) {
|
||||
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||
|
||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
|
||||
offset);
|
||||
qm_sg_ptr++;
|
||||
sg = sg_next(sg);
|
||||
sg_count--;
|
||||
len -= ent_len;
|
||||
}
|
||||
return qm_sg_ptr - 1;
|
||||
}
|
||||
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
||||
* convert scatterlist to h/w link table format
|
||||
* scatterlist must have been previously dma mapped
|
||||
*/
|
||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
|
||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
|
||||
struct qm_sg_entry *qm_sg_ptr, u16 offset)
|
||||
{
|
||||
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
|
||||
qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
|
||||
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
|
||||
}
|
||||
|
||||
|
@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
|
||||
* but does not have final bit; instead, returns last entry
|
||||
*/
|
||||
static inline struct dpaa2_sg_entry *
|
||||
sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
||||
sg_to_qm_sg(struct scatterlist *sg, int len,
|
||||
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
|
||||
{
|
||||
while (sg_count && sg) {
|
||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
|
||||
sg_dma_len(sg), offset);
|
||||
int ent_len;
|
||||
|
||||
while (len) {
|
||||
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||
|
||||
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
|
||||
offset);
|
||||
qm_sg_ptr++;
|
||||
sg = sg_next(sg);
|
||||
sg_count--;
|
||||
len -= ent_len;
|
||||
}
|
||||
return qm_sg_ptr - 1;
|
||||
}
|
||||
@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
|
||||
* convert scatterlist to h/w link table format
|
||||
* scatterlist must have been previously dma mapped
|
||||
*/
|
||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
|
||||
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
|
||||
struct dpaa2_sg_entry *qm_sg_ptr,
|
||||
u16 offset)
|
||||
{
|
||||
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
|
||||
qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
|
||||
dpaa2_sg_set_final(qm_sg_ptr, true);
|
||||
}
|
||||
|
||||
|
@@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
|
||||
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
|
||||
SEC4_SG_OFFSET_MASK);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
|
||||
sizeof(struct sec4_sg_entry), 1);
|
||||
#endif
|
||||
|
||||
print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
|
||||
* but does not have final bit; instead, returns last entry
|
||||
*/
|
||||
static inline struct sec4_sg_entry *
|
||||
sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
|
||||
sg_to_sec4_sg(struct scatterlist *sg, int len,
|
||||
struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
|
||||
{
|
||||
while (sg_count) {
|
||||
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
|
||||
sg_dma_len(sg), offset);
|
||||
int ent_len;
|
||||
|
||||
while (len) {
|
||||
ent_len = min_t(int, sg_dma_len(sg), len);
|
||||
|
||||
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
|
||||
offset);
|
||||
sec4_sg_ptr++;
|
||||
sg = sg_next(sg);
|
||||
sg_count--;
|
||||
len -= ent_len;
|
||||
}
|
||||
return sec4_sg_ptr - 1;
|
||||
}
|
||||
@@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
|
||||
* convert scatterlist to h/w link table format
|
||||
* scatterlist must have been previously dma mapped
|
||||
*/
|
||||
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
|
||||
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
|
||||
struct sec4_sg_entry *sec4_sg_ptr,
|
||||
u16 offset)
|
||||
{
|
||||
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
|
||||
sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
|
||||
sg_to_sec4_set_last(sec4_sg_ptr);
|
||||
}
|
||||
|
||||
|
@@ -7,7 +7,6 @@
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/crypto_wq.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/crypto.h>
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __NITROX_DEBUGFS_H
|
||||
#define __NITROX_DEBUGFS_H
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __NITROX_MBX_H
|
||||
#define __NITROX_MBX_H
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
/*
|
||||
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
|
||||
*
|
||||
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
|
||||
* Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*/
|
||||
@@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
|
||||
return -EINVAL;
|
||||
|
||||
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
|
||||
(ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
|
||||
(ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
|
||||
(ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
|
||||
(req->nbytes & (AES_BLOCK_SIZE - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = {
|
||||
.version = CCP_VERSION(3, 0),
|
||||
.name = "cfb(aes)",
|
||||
.driver_name = "cfb-aes-ccp",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.blocksize = 1,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.alg_defaults = &ccp_aes_defaults,
|
||||
},
|
||||
|
@@ -32,56 +32,62 @@ struct ccp_tasklet_data {
|
||||
};
|
||||
|
||||
/* Human-readable error strings */
|
||||
#define CCP_MAX_ERROR_CODE 64
|
||||
static char *ccp_error_codes[] = {
|
||||
"",
|
||||
"ERR 01: ILLEGAL_ENGINE",
|
||||
"ERR 02: ILLEGAL_KEY_ID",
|
||||
"ERR 03: ILLEGAL_FUNCTION_TYPE",
|
||||
"ERR 04: ILLEGAL_FUNCTION_MODE",
|
||||
"ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
|
||||
"ERR 06: ILLEGAL_FUNCTION_SIZE",
|
||||
"ERR 07: Zlib_MISSING_INIT_EOM",
|
||||
"ERR 08: ILLEGAL_FUNCTION_RSVD",
|
||||
"ERR 09: ILLEGAL_BUFFER_LENGTH",
|
||||
"ERR 10: VLSB_FAULT",
|
||||
"ERR 11: ILLEGAL_MEM_ADDR",
|
||||
"ERR 12: ILLEGAL_MEM_SEL",
|
||||
"ERR 13: ILLEGAL_CONTEXT_ID",
|
||||
"ERR 14: ILLEGAL_KEY_ADDR",
|
||||
"ERR 15: 0xF Reserved",
|
||||
"ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
|
||||
"ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
|
||||
"ERR 18: CMD_TIMEOUT",
|
||||
"ERR 19: IDMA0_AXI_SLVERR",
|
||||
"ERR 20: IDMA0_AXI_DECERR",
|
||||
"ERR 21: 0x15 Reserved",
|
||||
"ERR 22: IDMA1_AXI_SLAVE_FAULT",
|
||||
"ERR 23: IDMA1_AIXI_DECERR",
|
||||
"ERR 24: 0x18 Reserved",
|
||||
"ERR 25: ZLIBVHB_AXI_SLVERR",
|
||||
"ERR 26: ZLIBVHB_AXI_DECERR",
|
||||
"ERR 27: 0x1B Reserved",
|
||||
"ERR 27: ZLIB_UNEXPECTED_EOM",
|
||||
"ERR 27: ZLIB_EXTRA_DATA",
|
||||
"ERR 30: ZLIB_BTYPE",
|
||||
"ERR 31: ZLIB_UNDEFINED_SYMBOL",
|
||||
"ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
|
||||
"ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
|
||||
"ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
|
||||
"ERR 35: ZLIB_UNCOMPRESSED_LEN",
|
||||
"ERR 36: ZLIB_LIMIT_REACHED",
|
||||
"ERR 37: ZLIB_CHECKSUM_MISMATCH0",
|
||||
"ERR 38: ODMA0_AXI_SLVERR",
|
||||
"ERR 39: ODMA0_AXI_DECERR",
|
||||
"ERR 40: 0x28 Reserved",
|
||||
"ERR 41: ODMA1_AXI_SLVERR",
|
||||
"ERR 42: ODMA1_AXI_DECERR",
|
||||
"ERR 43: LSB_PARITY_ERR",
|
||||
"ILLEGAL_ENGINE",
|
||||
"ILLEGAL_KEY_ID",
|
||||
"ILLEGAL_FUNCTION_TYPE",
|
||||
"ILLEGAL_FUNCTION_MODE",
|
||||
"ILLEGAL_FUNCTION_ENCRYPT",
|
||||
"ILLEGAL_FUNCTION_SIZE",
|
||||
"Zlib_MISSING_INIT_EOM",
|
||||
"ILLEGAL_FUNCTION_RSVD",
|
||||
"ILLEGAL_BUFFER_LENGTH",
|
||||
"VLSB_FAULT",
|
||||
"ILLEGAL_MEM_ADDR",
|
||||
"ILLEGAL_MEM_SEL",
|
||||
"ILLEGAL_CONTEXT_ID",
|
||||
"ILLEGAL_KEY_ADDR",
|
||||
"0xF Reserved",
|
||||
"Zlib_ILLEGAL_MULTI_QUEUE",
|
||||
"Zlib_ILLEGAL_JOBID_CHANGE",
|
||||
"CMD_TIMEOUT",
|
||||
"IDMA0_AXI_SLVERR",
|
||||
"IDMA0_AXI_DECERR",
|
||||
"0x15 Reserved",
|
||||
"IDMA1_AXI_SLAVE_FAULT",
|
||||
"IDMA1_AIXI_DECERR",
|
||||
"0x18 Reserved",
|
||||
"ZLIBVHB_AXI_SLVERR",
|
||||
"ZLIBVHB_AXI_DECERR",
|
||||
"0x1B Reserved",
|
||||
"ZLIB_UNEXPECTED_EOM",
|
||||
"ZLIB_EXTRA_DATA",
|
||||
"ZLIB_BTYPE",
|
||||
"ZLIB_UNDEFINED_SYMBOL",
|
||||
"ZLIB_UNDEFINED_DISTANCE_S",
|
||||
"ZLIB_CODE_LENGTH_SYMBOL",
|
||||
"ZLIB _VHB_ILLEGAL_FETCH",
|
||||
"ZLIB_UNCOMPRESSED_LEN",
|
||||
"ZLIB_LIMIT_REACHED",
|
||||
"ZLIB_CHECKSUM_MISMATCH0",
|
||||
"ODMA0_AXI_SLVERR",
|
||||
"ODMA0_AXI_DECERR",
|
||||
"0x28 Reserved",
|
||||
"ODMA1_AXI_SLVERR",
|
||||
"ODMA1_AXI_DECERR",
|
||||
};
|
||||
|
||||
void ccp_log_error(struct ccp_device *d, int e)
|
||||
void ccp_log_error(struct ccp_device *d, unsigned int e)
|
||||
{
|
||||
dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
|
||||
if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
|
||||
return;
|
||||
|
||||
if (e < ARRAY_SIZE(ccp_error_codes))
|
||||
dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
|
||||
else
|
||||
dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
|
||||
}
|
||||
|
||||
/* List of CCPs, CCP count, read-write access lock, and access functions
|
||||
|
@@ -629,7 +629,7 @@ struct ccp5_desc {
|
||||
void ccp_add_device(struct ccp_device *ccp);
|
||||
void ccp_del_device(struct ccp_device *ccp);
|
||||
|
||||
extern void ccp_log_error(struct ccp_device *, int);
|
||||
extern void ccp_log_error(struct ccp_device *, unsigned int);
|
||||
|
||||
struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
|
||||
bool ccp_queues_suspended(struct ccp_device *ccp);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
/*
|
||||
* AMD Cryptographic Coprocessor (CCP) driver
|
||||
*
|
||||
* Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
|
||||
* Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
* Author: Gary R Hook <gary.hook@amd.com>
|
||||
@@ -890,8 +890,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
return -EINVAL;
|
||||
|
||||
if (((aes->mode == CCP_AES_MODE_ECB) ||
|
||||
(aes->mode == CCP_AES_MODE_CBC) ||
|
||||
(aes->mode == CCP_AES_MODE_CFB)) &&
|
||||
(aes->mode == CCP_AES_MODE_CBC)) &&
|
||||
(aes->src_len & (AES_BLOCK_SIZE - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1264,6 +1263,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
int ret;
|
||||
|
||||
/* Error checks */
|
||||
if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (!cmd_q->ccp->vdata->perform->des3)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1346,8 +1348,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
* passthru option to convert from big endian to little endian.
|
||||
*/
|
||||
if (des3->mode != CCP_DES3_MODE_ECB) {
|
||||
u32 load_mode;
|
||||
|
||||
op.sb_ctx = cmd_q->sb_ctx;
|
||||
|
||||
ret = ccp_init_dm_workarea(&ctx, cmd_q,
|
||||
@@ -1363,12 +1363,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
if (ret)
|
||||
goto e_ctx;
|
||||
|
||||
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
|
||||
load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
|
||||
else
|
||||
load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
|
||||
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
|
||||
load_mode);
|
||||
CCP_PASSTHRU_BYTESWAP_256BIT);
|
||||
if (ret) {
|
||||
cmd->engine_error = cmd_q->cmd_error;
|
||||
goto e_ctx;
|
||||
@@ -1430,10 +1426,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
}
|
||||
|
||||
/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
|
||||
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
|
||||
dm_offset = CCP_SB_BYTES - des3->iv_len;
|
||||
else
|
||||
dm_offset = 0;
|
||||
ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
|
||||
DES3_EDE_BLOCK_SIZE);
|
||||
}
|
||||
|
@@ -48,6 +48,7 @@ struct cc_hw_data {
|
||||
};
|
||||
|
||||
#define CC_NUM_IDRS 4
|
||||
#define CC_HW_RESET_LOOP_COUNT 10
|
||||
|
||||
/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
|
||||
static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
|
||||
@@ -133,6 +134,9 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
|
||||
u32 imr;
|
||||
|
||||
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
|
||||
/* if driver suspended return, probebly shared interrupt */
|
||||
if (cc_pm_is_dev_suspended(dev))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* read the interrupt status */
|
||||
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
|
||||
@@ -188,6 +192,31 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
|
||||
{
|
||||
unsigned int val;
|
||||
unsigned int i;
|
||||
|
||||
/* 712/710/63 has no reset completion indication, always return true */
|
||||
if (drvdata->hw_rev <= CC_HW_REV_712)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
|
||||
/* in cc7x3 NVM_IS_IDLE indicates that CC reset is
|
||||
* completed and device is fully functional
|
||||
*/
|
||||
val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
|
||||
if (val & CC_NVM_IS_IDLE_MASK) {
|
||||
/* hw indicate reset completed */
|
||||
return true;
|
||||
}
|
||||
/* allow scheduling other process on the processor */
|
||||
schedule();
|
||||
}
|
||||
/* reset not completed */
|
||||
return false;
|
||||
}
|
||||
|
||||
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
|
||||
{
|
||||
unsigned int val, cache_params;
|
||||
@@ -315,15 +344,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
return new_drvdata->irq;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
|
||||
IRQF_SHARED, "ccree", new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Could not register to interrupt %d\n",
|
||||
new_drvdata->irq);
|
||||
return rc;
|
||||
}
|
||||
dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
|
||||
|
||||
init_completion(&new_drvdata->hw_queue_avail);
|
||||
|
||||
if (!plat_dev->dev.dma_mask)
|
||||
@@ -352,6 +372,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
|
||||
new_drvdata->sec_disabled = cc_sec_disable;
|
||||
|
||||
/* wait for Crytpcell reset completion */
|
||||
if (!cc_wait_for_reset_completion(new_drvdata)) {
|
||||
dev_err(dev, "Cryptocell reset not completed");
|
||||
}
|
||||
|
||||
if (hw_rev->rev <= CC_HW_REV_712) {
|
||||
/* Verify correct mapping */
|
||||
val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
|
||||
@@ -383,6 +408,24 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
}
|
||||
sig_cidr = val;
|
||||
|
||||
/* Check HW engine configuration */
|
||||
val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
|
||||
switch (val) {
|
||||
case CC_PINS_FULL:
|
||||
/* This is fine */
|
||||
break;
|
||||
case CC_PINS_SLIM:
|
||||
if (new_drvdata->std_bodies & CC_STD_NIST) {
|
||||
dev_warn(dev, "703 mode forced due to HW configuration.\n");
|
||||
new_drvdata->std_bodies = CC_STD_OSCCA;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Unsupported engines configration.\n");
|
||||
rc = -EINVAL;
|
||||
goto post_clk_err;
|
||||
}
|
||||
|
||||
/* Check security disable state */
|
||||
val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
|
||||
val &= CC_SECURITY_DISABLED_MASK;
|
||||
@@ -401,6 +444,15 @@ static int init_cc_resources(struct platform_device *plat_dev)
|
||||
/* Display HW versions */
|
||||
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
|
||||
hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
|
||||
/* register the driver isr function */
|
||||
rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
|
||||
IRQF_SHARED, "ccree", new_drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "Could not register to interrupt %d\n",
|
||||
new_drvdata->irq);
|
||||
goto post_clk_err;
|
||||
}
|
||||
dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
|
||||
|
||||
rc = init_cc_regs(new_drvdata, true);
|
||||
if (rc) {
|
||||
|
@@ -53,6 +53,9 @@ enum cc_std_body {
|
||||
|
||||
#define CC_COHERENT_CACHE_PARAMS 0xEEE
|
||||
|
||||
#define CC_PINS_FULL 0x0
|
||||
#define CC_PINS_SLIM 0x9F
|
||||
|
||||
/* Maximum DMA mask supported by IP */
|
||||
#define DMA_BIT_MASK_LEN 48
|
||||
|
||||
@@ -67,6 +70,8 @@ enum cc_std_body {
|
||||
|
||||
#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
|
||||
|
||||
#define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)
|
||||
|
||||
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
|
||||
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
|
||||
@@ -216,6 +221,7 @@ static inline void dump_byte_array(const char *name, const u8 *the_array,
|
||||
__dump_byte_array(name, the_array, size);
|
||||
}
|
||||
|
||||
bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata);
|
||||
int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
|
||||
void fini_cc_regs(struct cc_drvdata *drvdata);
|
||||
int cc_clk_on(struct cc_drvdata *drvdata);
|
||||
|
@@ -114,6 +114,9 @@
|
||||
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
|
||||
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
|
||||
#define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL
|
||||
#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
|
||||
#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
|
||||
@@ -203,6 +206,23 @@
|
||||
#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL
|
||||
#define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL
|
||||
// --------------------------------------
|
||||
// BLOCK: ID_REGISTERS
|
||||
// --------------------------------------
|
||||
|
@@ -49,6 +49,11 @@ int cc_pm_resume(struct device *dev)
|
||||
dev_err(dev, "failed getting clock back on. We're toast.\n");
|
||||
return rc;
|
||||
}
|
||||
/* wait for Crytpcell reset completion */
|
||||
if (!cc_wait_for_reset_completion(drvdata)) {
|
||||
dev_err(dev, "Cryptocell reset not completed");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
|
||||
rc = init_cc_regs(drvdata, false);
|
||||
@@ -101,6 +106,12 @@ int cc_pm_put_suspend(struct device *dev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool cc_pm_is_dev_suspended(struct device *dev)
|
||||
{
|
||||
/* check device state using runtime api */
|
||||
return pm_runtime_suspended(dev);
|
||||
}
|
||||
|
||||
int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct device *dev = drvdata_to_dev(drvdata);
|
||||
|
@@ -22,6 +22,7 @@ int cc_pm_suspend(struct device *dev);
|
||||
int cc_pm_resume(struct device *dev);
|
||||
int cc_pm_get(struct device *dev);
|
||||
int cc_pm_put_suspend(struct device *dev);
|
||||
bool cc_pm_is_dev_suspended(struct device *dev);
|
||||
|
||||
#else
|
||||
|
||||
@@ -54,6 +55,12 @@ static inline int cc_pm_put_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool cc_pm_is_dev_suspended(struct device *dev)
|
||||
{
|
||||
/* if PM not supported device is never suspend */
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /*__POWER_MGR_H__*/
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2016-2017 Hisilicon Limited. */
|
||||
|
||||
#ifndef _SEC_DRV_H_
|
||||
|
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
|
||||
|
||||
/* Processing Engine configuration */
|
||||
|
||||
/* Token & context configuration */
|
||||
val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
|
||||
EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
|
||||
EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
|
||||
writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
|
||||
|
||||
/* H/W capabilities selection */
|
||||
val = EIP197_FUNCTION_RSVD;
|
||||
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
|
||||
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
|
||||
if (rdesc->result_data.error_code & 0x407f) {
|
||||
/* Fatal error (bits 0-7, 14) */
|
||||
dev_err(priv->dev,
|
||||
"cipher: result: result descriptor error (%d)\n",
|
||||
"cipher: result: result descriptor error (0x%x)\n",
|
||||
rdesc->result_data.error_code);
|
||||
return -EIO;
|
||||
return -EINVAL;
|
||||
} else if (rdesc->result_data.error_code == BIT(9)) {
|
||||
/* Authentication failed */
|
||||
return -EBADMSG;
|
||||
@@ -720,11 +726,10 @@ handle_results:
|
||||
}
|
||||
|
||||
acknowledge:
|
||||
if (i) {
|
||||
if (i)
|
||||
writel(EIP197_xDR_PROC_xD_PKT(i) |
|
||||
EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
|
||||
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
|
||||
}
|
||||
|
||||
/* If the number of requests overflowed the counter, try to proceed more
|
||||
* requests.
|
||||
|
@@ -118,6 +118,7 @@
|
||||
#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
|
||||
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
|
||||
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
|
||||
#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
|
||||
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
|
||||
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
|
||||
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
|
||||
@@ -249,6 +250,11 @@
|
||||
#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
|
||||
#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
|
||||
|
||||
/* EIP197_PE_EIP96_TOKEN_CTRL */
|
||||
#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
|
||||
#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
|
||||
#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
|
||||
|
||||
/* EIP197_PE_EIP96_FUNCTION_EN */
|
||||
#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
|
||||
#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
|
||||
@@ -333,6 +339,7 @@ struct safexcel_context_record {
|
||||
#define CONTEXT_CONTROL_IV3 BIT(8)
|
||||
#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
|
||||
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
|
||||
#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
|
||||
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
|
||||
|
||||
/* The hash counter given to the engine in the context has a granularity of
|
||||
@@ -425,6 +432,10 @@ struct safexcel_token {
|
||||
|
||||
#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
|
||||
|
||||
#define EIP197_TOKEN_CTX_OFFSET(x) (x)
|
||||
#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
|
||||
#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
|
||||
|
||||
#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
|
||||
#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
|
||||
#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
|
||||
@@ -432,6 +443,7 @@ struct safexcel_token {
|
||||
#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
|
||||
#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
|
||||
#define EIP197_TOKEN_OPCODE_VERIFY 0xd
|
||||
#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
|
||||
#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
|
||||
|
||||
static inline void eip197_noop_token(struct safexcel_token *token)
|
||||
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
|
||||
|
||||
/* Instructions */
|
||||
#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
|
||||
#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
|
||||
#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
|
||||
#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
|
||||
#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
|
||||
#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
|
||||
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
|
||||
|
||||
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
|
||||
#define EIP197_OPTION_64BIT_CTX BIT(1)
|
||||
#define EIP197_OPTION_RC_AUTO (0x2 << 3)
|
||||
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
|
||||
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
|
||||
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
|
||||
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
|
||||
u32 digest;
|
||||
|
||||
u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
|
||||
u8 cache[SHA512_BLOCK_SIZE];
|
||||
u8 cache[SHA512_BLOCK_SIZE << 1];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
|
||||
|
||||
struct safexcel_cipher_req {
|
||||
enum safexcel_cipher_direction direction;
|
||||
/* Number of result descriptors associated to the request */
|
||||
unsigned int rdescs;
|
||||
bool needs_inv;
|
||||
};
|
||||
|
||||
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
|
||||
u32 length)
|
||||
{
|
||||
struct safexcel_token *token;
|
||||
unsigned offset = 0;
|
||||
u32 offset = 0, block_sz = 0;
|
||||
|
||||
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
|
||||
switch (ctx->alg) {
|
||||
case SAFEXCEL_DES:
|
||||
offset = DES_BLOCK_SIZE / sizeof(u32);
|
||||
memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
|
||||
block_sz = DES_BLOCK_SIZE;
|
||||
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
|
||||
break;
|
||||
case SAFEXCEL_3DES:
|
||||
offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
|
||||
memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
|
||||
block_sz = DES3_EDE_BLOCK_SIZE;
|
||||
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
|
||||
break;
|
||||
|
||||
case SAFEXCEL_AES:
|
||||
offset = AES_BLOCK_SIZE / sizeof(u32);
|
||||
memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
|
||||
block_sz = AES_BLOCK_SIZE;
|
||||
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
|
||||
break;
|
||||
}
|
||||
|
||||
offset = block_sz / sizeof(u32);
|
||||
memcpy(cdesc->control_data.token, iv, block_sz);
|
||||
}
|
||||
|
||||
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
|
||||
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
|
||||
token[0].instructions = EIP197_TOKEN_INS_LAST |
|
||||
EIP197_TOKEN_INS_TYPE_CRYTO |
|
||||
EIP197_TOKEN_INS_TYPE_OUTPUT;
|
||||
|
||||
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
|
||||
u32 last = (EIP197_MAX_TOKENS - 1) - offset;
|
||||
|
||||
token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
|
||||
token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
|
||||
EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
|
||||
EIP197_TOKEN_CTX_OFFSET(0x2);
|
||||
token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
|
||||
EIP197_TOKEN_STAT_LAST_PACKET;
|
||||
token[last].instructions =
|
||||
EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
|
||||
EIP197_TOKEN_INS_ORIGIN_IV0;
|
||||
|
||||
/* Store the updated IV values back in the internal context
|
||||
* registers.
|
||||
*/
|
||||
cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
|
||||
}
|
||||
}
|
||||
|
||||
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
|
||||
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
|
||||
|
||||
*ret = 0;
|
||||
|
||||
do {
|
||||
if (unlikely(!sreq->rdescs))
|
||||
return 0;
|
||||
|
||||
while (sreq->rdescs--) {
|
||||
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
|
||||
if (IS_ERR(rdesc)) {
|
||||
dev_err(priv->dev,
|
||||
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
|
||||
*ret = safexcel_rdesc_check_errors(priv, rdesc);
|
||||
|
||||
ndesc++;
|
||||
} while (!rdesc->last_seg);
|
||||
}
|
||||
|
||||
safexcel_complete(priv, ring);
|
||||
|
||||
if (src == dst) {
|
||||
dma_unmap_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, cryptlen),
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, cryptlen),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_sg(priv->dev, dst,
|
||||
sg_nents_for_len(dst, cryptlen),
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
|
||||
dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
*should_complete = true;
|
||||
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
|
||||
int i, ret = 0;
|
||||
|
||||
if (src == dst) {
|
||||
nr_src = dma_map_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, totlen),
|
||||
nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
|
||||
DMA_BIDIRECTIONAL);
|
||||
nr_dst = nr_src;
|
||||
if (!nr_src)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
nr_src = dma_map_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, totlen),
|
||||
nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
|
||||
DMA_TO_DEVICE);
|
||||
if (!nr_src)
|
||||
return -EINVAL;
|
||||
|
||||
nr_dst = dma_map_sg(priv->dev, dst,
|
||||
sg_nents_for_len(dst, totlen),
|
||||
nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
|
||||
DMA_FROM_DEVICE);
|
||||
if (!nr_dst) {
|
||||
dma_unmap_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, totlen),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
|
||||
|
||||
/* result descriptors */
|
||||
for_each_sg(dst, sg, nr_dst, i) {
|
||||
bool first = !i, last = (i == nr_dst - 1);
|
||||
bool first = !i, last = sg_is_last(sg);
|
||||
u32 len = sg_dma_len(sg);
|
||||
|
||||
rdesc = safexcel_add_rdesc(priv, ring, first, last,
|
||||
@@ -483,16 +495,10 @@ cdesc_rollback:
|
||||
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
|
||||
|
||||
if (src == dst) {
|
||||
dma_unmap_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, totlen),
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sg(priv->dev, src,
|
||||
sg_nents_for_len(src, totlen),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_sg(priv->dev, dst,
|
||||
sg_nents_for_len(dst, totlen),
|
||||
DMA_FROM_DEVICE);
|
||||
dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
|
||||
dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -501,6 +507,7 @@ cdesc_rollback:
|
||||
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
||||
int ring,
|
||||
struct crypto_async_request *base,
|
||||
struct safexcel_cipher_req *sreq,
|
||||
bool *should_complete, int *ret)
|
||||
{
|
||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
|
||||
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
||||
|
||||
*ret = 0;
|
||||
|
||||
do {
|
||||
if (unlikely(!sreq->rdescs))
|
||||
return 0;
|
||||
|
||||
while (sreq->rdescs--) {
|
||||
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
|
||||
if (IS_ERR(rdesc)) {
|
||||
dev_err(priv->dev,
|
||||
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
||||
*ret = safexcel_rdesc_check_errors(priv, rdesc);
|
||||
|
||||
ndesc++;
|
||||
} while (!rdesc->last_seg);
|
||||
}
|
||||
|
||||
safexcel_complete(priv, ring);
|
||||
|
||||
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
|
||||
{
|
||||
struct skcipher_request *req = skcipher_request_cast(async);
|
||||
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
|
||||
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
|
||||
int err;
|
||||
|
||||
if (sreq->needs_inv) {
|
||||
sreq->needs_inv = false;
|
||||
err = safexcel_handle_inv_result(priv, ring, async,
|
||||
err = safexcel_handle_inv_result(priv, ring, async, sreq,
|
||||
should_complete, ret);
|
||||
} else {
|
||||
err = safexcel_handle_req_result(priv, ring, async, req->src,
|
||||
req->dst, req->cryptlen, sreq,
|
||||
should_complete, ret);
|
||||
|
||||
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
|
||||
u32 block_sz = 0;
|
||||
|
||||
switch (ctx->alg) {
|
||||
case SAFEXCEL_DES:
|
||||
block_sz = DES_BLOCK_SIZE;
|
||||
break;
|
||||
case SAFEXCEL_3DES:
|
||||
block_sz = DES3_EDE_BLOCK_SIZE;
|
||||
break;
|
||||
case SAFEXCEL_AES:
|
||||
block_sz = AES_BLOCK_SIZE;
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(req->iv, ctx->base.ctxr->data, block_sz);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
|
||||
|
||||
if (sreq->needs_inv) {
|
||||
sreq->needs_inv = false;
|
||||
err = safexcel_handle_inv_result(priv, ring, async,
|
||||
err = safexcel_handle_inv_result(priv, ring, async, sreq,
|
||||
should_complete, ret);
|
||||
} else {
|
||||
err = safexcel_handle_req_result(priv, ring, async, req->src,
|
||||
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
|
||||
ret = safexcel_send_req(async, ring, sreq, req->src,
|
||||
req->dst, req->cryptlen, 0, 0, req->iv,
|
||||
commands, results);
|
||||
|
||||
sreq->rdescs = *results;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
|
||||
req->cryptlen, req->assoclen,
|
||||
crypto_aead_authsize(tfm), req->iv,
|
||||
commands, results);
|
||||
sreq->rdescs = *results;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
|
||||
u64 len[2];
|
||||
u64 processed[2];
|
||||
|
||||
u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
|
||||
u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
|
||||
dma_addr_t cache_dma;
|
||||
unsigned int cache_sz;
|
||||
|
||||
u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
|
||||
u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
|
||||
};
|
||||
|
||||
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
|
||||
{
|
||||
if (req->len[1] > req->processed[1])
|
||||
return 0xffffffff - (req->len[0] - req->processed[0]);
|
||||
u64 len, processed;
|
||||
|
||||
return req->len[0] - req->processed[0];
|
||||
len = (0xffffffff * req->len[1]) + req->len[0];
|
||||
processed = (0xffffffff * req->processed[1]) + req->processed[0];
|
||||
|
||||
return len - processed;
|
||||
}
|
||||
|
||||
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
|
||||
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
|
||||
cdesc->control_data.control0 |= ctx->alg;
|
||||
cdesc->control_data.control0 |= req->digest;
|
||||
|
||||
if (!req->finish)
|
||||
cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
|
||||
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
|
||||
if (req->processed[0] || req->processed[1]) {
|
||||
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
|
||||
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
|
||||
cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
|
||||
}
|
||||
|
||||
if (!req->finish)
|
||||
cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
|
||||
|
||||
/*
|
||||
* Copy the input digest if needed, and setup the context
|
||||
* fields. Do this now as we need it to setup the first command
|
||||
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
|
||||
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
|
||||
DMA_TO_DEVICE);
|
||||
sreq->cache_dma = 0;
|
||||
sreq->cache_sz = 0;
|
||||
}
|
||||
|
||||
if (sreq->finish)
|
||||
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
||||
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
|
||||
struct safexcel_result_desc *rdesc;
|
||||
struct scatterlist *sg;
|
||||
int i, extra, n_cdesc = 0, ret = 0;
|
||||
u64 queued, len, cache_len;
|
||||
int i, extra = 0, n_cdesc = 0, ret = 0;
|
||||
u64 queued, len, cache_len, cache_max;
|
||||
|
||||
cache_max = crypto_ahash_blocksize(ahash);
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
|
||||
cache_max <<= 1;
|
||||
|
||||
queued = len = safexcel_queued_len(req);
|
||||
if (queued <= crypto_ahash_blocksize(ahash))
|
||||
if (queued <= cache_max)
|
||||
cache_len = queued;
|
||||
else
|
||||
cache_len = queued - areq->nbytes;
|
||||
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
||||
* fit into full blocks, cache it for the next send() call.
|
||||
*/
|
||||
extra = queued & (crypto_ahash_blocksize(ahash) - 1);
|
||||
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
|
||||
extra < crypto_ahash_blocksize(ahash))
|
||||
extra += crypto_ahash_blocksize(ahash);
|
||||
|
||||
/* If this is not the last request and the queued data
|
||||
* is a multiple of a block, cache the last one for now.
|
||||
*/
|
||||
if (!extra)
|
||||
/* If this is not the last request and the queued data
|
||||
* is a multiple of a block, cache the last one for now.
|
||||
*/
|
||||
extra = crypto_ahash_blocksize(ahash);
|
||||
|
||||
if (extra) {
|
||||
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
||||
req->cache_next, extra,
|
||||
areq->nbytes - extra);
|
||||
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
||||
req->cache_next, extra,
|
||||
areq->nbytes - extra);
|
||||
|
||||
queued -= extra;
|
||||
len -= extra;
|
||||
|
||||
if (!queued) {
|
||||
*commands = 0;
|
||||
*results = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
queued -= extra;
|
||||
len -= extra;
|
||||
}
|
||||
|
||||
/* Add a command descriptor for the cached data, if any */
|
||||
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
|
||||
}
|
||||
|
||||
/* Now handle the current ahash request buffer(s) */
|
||||
req->nents = dma_map_sg(priv->dev, areq->src,
|
||||
sg_nents_for_len(areq->src, areq->nbytes),
|
||||
req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
|
||||
DMA_TO_DEVICE);
|
||||
if (!req->nents) {
|
||||
ret = -ENOMEM;
|
||||
@@ -345,6 +348,7 @@ unmap_cache:
|
||||
if (req->cache_dma) {
|
||||
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
|
||||
DMA_TO_DEVICE);
|
||||
req->cache_dma = 0;
|
||||
req->cache_sz = 0;
|
||||
}
|
||||
|
||||
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
||||
struct safexcel_inv_result result = {};
|
||||
int ring = ctx->base.ring;
|
||||
|
||||
memset(req, 0, sizeof(struct ahash_request));
|
||||
memset(req, 0, EIP197_AHASH_REQ_SIZE);
|
||||
|
||||
/* create invalidation request */
|
||||
init_completion(&result.completion);
|
||||
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
||||
/* safexcel_ahash_cache: cache data until at least one request can be sent to
|
||||
* the engine, aka. when there is at least 1 block size in the pipe.
|
||||
*/
|
||||
static int safexcel_ahash_cache(struct ahash_request *areq)
|
||||
static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
|
||||
{
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
u64 queued, cache_len;
|
||||
|
||||
/* queued: everything accepted by the driver which will be handled by
|
||||
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
|
||||
* In case there isn't enough bytes to proceed (less than a
|
||||
* block size), cache the data until we have enough.
|
||||
*/
|
||||
if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
|
||||
if (cache_len + areq->nbytes <= cache_max) {
|
||||
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
|
||||
req->cache + cache_len,
|
||||
areq->nbytes, 0);
|
||||
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
|
||||
{
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
u32 cache_max;
|
||||
|
||||
/* If the request is 0 length, do nothing */
|
||||
if (!areq->nbytes)
|
||||
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
|
||||
if (req->len[0] < areq->nbytes)
|
||||
req->len[1]++;
|
||||
|
||||
safexcel_ahash_cache(areq);
|
||||
cache_max = crypto_ahash_blocksize(ahash);
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
|
||||
cache_max <<= 1;
|
||||
|
||||
safexcel_ahash_cache(areq, cache_max);
|
||||
|
||||
/*
|
||||
* We're not doing partial updates when performing an hmac request.
|
||||
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
|
||||
return safexcel_ahash_enqueue(areq);
|
||||
|
||||
if (!req->last_req &&
|
||||
safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
|
||||
safexcel_queued_len(req) > cache_max)
|
||||
return safexcel_ahash_enqueue(areq);
|
||||
|
||||
return 0;
|
||||
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
struct safexcel_ahash_export_state *export = out;
|
||||
u32 cache_sz;
|
||||
|
||||
cache_sz = crypto_ahash_blocksize(ahash);
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
|
||||
cache_sz <<= 1;
|
||||
|
||||
export->len[0] = req->len[0];
|
||||
export->len[1] = req->len[1];
|
||||
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
|
||||
export->digest = req->digest;
|
||||
|
||||
memcpy(export->state, req->state, req->state_sz);
|
||||
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
|
||||
memcpy(export->cache, req->cache, cache_sz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
|
||||
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
|
||||
const struct safexcel_ahash_export_state *export = in;
|
||||
u32 cache_sz;
|
||||
int ret;
|
||||
|
||||
ret = crypto_ahash_init(areq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cache_sz = crypto_ahash_blocksize(ahash);
|
||||
if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
|
||||
cache_sz <<= 1;
|
||||
|
||||
req->len[0] = export->len[0];
|
||||
req->len[1] = export->len[1];
|
||||
req->processed[0] = export->processed[0];
|
||||
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
|
||||
|
||||
req->digest = export->digest;
|
||||
|
||||
memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
|
||||
memcpy(req->cache, export->cache, cache_sz);
|
||||
memcpy(req->state, export->state, req->state_sz);
|
||||
|
||||
return 0;
|
||||
|
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
|
||||
(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
|
||||
cdesc->control_data.context_hi = upper_32_bits(context);
|
||||
|
||||
if (priv->version == EIP197B || priv->version == EIP197D)
|
||||
cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
|
||||
|
||||
/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
|
||||
cdesc->control_data.refresh = 2;
|
||||
|
||||
|
@@ -100,7 +100,7 @@ struct buffer_desc {
|
||||
u16 pkt_len;
|
||||
u16 buf_len;
|
||||
#endif
|
||||
u32 phys_addr;
|
||||
dma_addr_t phys_addr;
|
||||
u32 __reserved[4];
|
||||
struct buffer_desc *next;
|
||||
enum dma_data_direction dir;
|
||||
@@ -117,9 +117,9 @@ struct crypt_ctl {
|
||||
u8 mode; /* NPE_OP_* operation mode */
|
||||
#endif
|
||||
u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
|
||||
u32 icv_rev_aes; /* icv or rev aes */
|
||||
u32 src_buf;
|
||||
u32 dst_buf;
|
||||
dma_addr_t icv_rev_aes; /* icv or rev aes */
|
||||
dma_addr_t src_buf;
|
||||
dma_addr_t dst_buf;
|
||||
#ifdef __ARMEB__
|
||||
u16 auth_offs; /* Authentication start offset */
|
||||
u16 auth_len; /* Authentication data length */
|
||||
@@ -320,7 +320,8 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
|
||||
static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
|
||||
dma_addr_t phys)
|
||||
{
|
||||
while (buf) {
|
||||
struct buffer_desc *buf1;
|
||||
@@ -602,7 +603,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
|
||||
struct buffer_desc *buf;
|
||||
int i;
|
||||
u8 *pad;
|
||||
u32 pad_phys, buf_phys;
|
||||
dma_addr_t pad_phys, buf_phys;
|
||||
|
||||
BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
|
||||
pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
|
||||
@@ -787,7 +788,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
|
||||
for (; nbytes > 0; sg = sg_next(sg)) {
|
||||
unsigned len = min(nbytes, sg->length);
|
||||
struct buffer_desc *next_buf;
|
||||
u32 next_buf_phys;
|
||||
dma_addr_t next_buf_phys;
|
||||
void *ptr;
|
||||
|
||||
nbytes -= len;
|
||||
|
@@ -986,8 +986,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dcp *sdcp = NULL;
|
||||
int i, ret;
|
||||
|
||||
struct resource *iores;
|
||||
int dcp_vmi_irq, dcp_irq;
|
||||
|
||||
if (global_sdcp) {
|
||||
@@ -995,7 +993,6 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dcp_vmi_irq = platform_get_irq(pdev, 0);
|
||||
if (dcp_vmi_irq < 0) {
|
||||
dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
|
||||
@@ -1013,7 +1010,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
sdcp->dev = dev;
|
||||
sdcp->base = devm_ioremap_resource(dev, iores);
|
||||
sdcp->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sdcp->base))
|
||||
return PTR_ERR(sdcp->base);
|
||||
|
||||
|
@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
|
||||
#define WORKMEM_ALIGN (CRB_ALIGN)
|
||||
#define CSB_WAIT_MAX (5000) /* ms */
|
||||
#define VAS_RETRIES (10)
|
||||
/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
|
||||
#define MAX_CREDITS_PER_RXFIFO (1024)
|
||||
|
||||
struct nx842_workmem {
|
||||
/* Below fields must be properly aligned */
|
||||
@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
|
||||
rxattr.lnotify_lpid = lpid;
|
||||
rxattr.lnotify_pid = pid;
|
||||
rxattr.lnotify_tid = tid;
|
||||
rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
|
||||
/*
|
||||
* Maximum RX window credits can not be more than #CRBs in
|
||||
* RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
|
||||
*/
|
||||
rxattr.wcreds_max = fifo_size / CRB_SIZE;
|
||||
|
||||
/*
|
||||
* Open a VAS receice window which is used to configure RxFIFO
|
||||
|
@@ -569,9 +569,7 @@ static int nx_register_algs(void)
|
||||
|
||||
memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
|
||||
|
||||
rc = NX_DEBUGFS_INIT(&nx_driver);
|
||||
if (rc)
|
||||
goto out;
|
||||
NX_DEBUGFS_INIT(&nx_driver);
|
||||
|
||||
nx_driver.of.status = NX_OKAY;
|
||||
|
||||
|
@@ -76,20 +76,12 @@ struct nx_stats {
|
||||
atomic_t last_error_pid;
|
||||
};
|
||||
|
||||
struct nx_debugfs {
|
||||
struct dentry *dfs_root;
|
||||
struct dentry *dfs_aes_ops, *dfs_aes_bytes;
|
||||
struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
|
||||
struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
|
||||
struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
|
||||
};
|
||||
|
||||
struct nx_crypto_driver {
|
||||
struct nx_stats stats;
|
||||
struct nx_of of;
|
||||
struct vio_dev *viodev;
|
||||
struct vio_driver viodriver;
|
||||
struct nx_debugfs dfs;
|
||||
struct dentry *dfs_root;
|
||||
};
|
||||
|
||||
#define NX_GCM4106_NONCE_LEN (4)
|
||||
@@ -177,7 +169,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
|
||||
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
|
||||
#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
|
||||
|
||||
int nx_debugfs_init(struct nx_crypto_driver *);
|
||||
void nx_debugfs_init(struct nx_crypto_driver *);
|
||||
void nx_debugfs_fini(struct nx_crypto_driver *);
|
||||
#else
|
||||
#define NX_DEBUGFS_INIT(drv) (0)
|
||||
|
@@ -30,62 +30,37 @@
|
||||
* Documentation/ABI/testing/debugfs-pfo-nx-crypto
|
||||
*/
|
||||
|
||||
int nx_debugfs_init(struct nx_crypto_driver *drv)
|
||||
void nx_debugfs_init(struct nx_crypto_driver *drv)
|
||||
{
|
||||
struct nx_debugfs *dfs = &drv->dfs;
|
||||
struct dentry *root;
|
||||
|
||||
dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
|
||||
root = debugfs_create_dir(NX_NAME, NULL);
|
||||
drv->dfs_root = root;
|
||||
|
||||
dfs->dfs_aes_ops =
|
||||
debugfs_create_u32("aes_ops",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
|
||||
dfs->dfs_sha256_ops =
|
||||
debugfs_create_u32("sha256_ops",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u32 *)&drv->stats.sha256_ops);
|
||||
dfs->dfs_sha512_ops =
|
||||
debugfs_create_u32("sha512_ops",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u32 *)&drv->stats.sha512_ops);
|
||||
dfs->dfs_aes_bytes =
|
||||
debugfs_create_u64("aes_bytes",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u64 *)&drv->stats.aes_bytes);
|
||||
dfs->dfs_sha256_bytes =
|
||||
debugfs_create_u64("sha256_bytes",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u64 *)&drv->stats.sha256_bytes);
|
||||
dfs->dfs_sha512_bytes =
|
||||
debugfs_create_u64("sha512_bytes",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u64 *)&drv->stats.sha512_bytes);
|
||||
dfs->dfs_errors =
|
||||
debugfs_create_u32("errors",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root, (u32 *)&drv->stats.errors);
|
||||
dfs->dfs_last_error =
|
||||
debugfs_create_u32("last_error",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u32 *)&drv->stats.last_error);
|
||||
dfs->dfs_last_error_pid =
|
||||
debugfs_create_u32("last_error_pid",
|
||||
S_IRUSR | S_IRGRP | S_IROTH,
|
||||
dfs->dfs_root,
|
||||
(u32 *)&drv->stats.last_error_pid);
|
||||
return 0;
|
||||
debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.aes_ops);
|
||||
debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.sha256_ops);
|
||||
debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.sha512_ops);
|
||||
debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u64 *)&drv->stats.aes_bytes);
|
||||
debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u64 *)&drv->stats.sha256_bytes);
|
||||
debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u64 *)&drv->stats.sha512_bytes);
|
||||
debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.errors);
|
||||
debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.last_error);
|
||||
debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
|
||||
root, (u32 *)&drv->stats.last_error_pid);
|
||||
}
|
||||
|
||||
void
|
||||
nx_debugfs_fini(struct nx_crypto_driver *drv)
|
||||
{
|
||||
debugfs_remove_recursive(drv->dfs.dfs_root);
|
||||
debugfs_remove_recursive(drv->dfs_root);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -131,7 +131,6 @@ struct qat_alg_ablkcipher_ctx {
|
||||
struct icp_qat_fw_la_bulk_req dec_fw_req;
|
||||
struct qat_crypto_instance *inst;
|
||||
struct crypto_tfm *tfm;
|
||||
spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
|
||||
};
|
||||
|
||||
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
|
||||
@@ -223,6 +222,9 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
return -EFAULT;
|
||||
|
||||
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
|
||||
if (offset < 0)
|
||||
return -EFAULT;
|
||||
|
||||
hash_state_out = (__be32 *)(hash->sha.state1 + offset);
|
||||
hash512_state_out = (__be64 *)hash_state_out;
|
||||
|
||||
@@ -253,7 +255,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
|
||||
static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
|
||||
{
|
||||
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
|
||||
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_UPDATE_STATE);
|
||||
}
|
||||
|
||||
static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
|
||||
{
|
||||
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
|
||||
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_NO_UPDATE_STATE);
|
||||
}
|
||||
|
||||
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
|
||||
int aead)
|
||||
{
|
||||
header->hdr_flags =
|
||||
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
|
||||
@@ -263,12 +282,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
|
||||
QAT_COMN_PTR_TYPE_SGL);
|
||||
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_PARTIAL_NONE);
|
||||
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
|
||||
if (aead)
|
||||
qat_alg_init_hdr_no_iv_updt(header);
|
||||
else
|
||||
qat_alg_init_hdr_iv_updt(header);
|
||||
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_NO_PROTO);
|
||||
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_NO_UPDATE_STATE);
|
||||
}
|
||||
|
||||
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
|
||||
@@ -303,7 +322,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
|
||||
return -EFAULT;
|
||||
|
||||
/* Request setup */
|
||||
qat_alg_init_common_hdr(header);
|
||||
qat_alg_init_common_hdr(header, 1);
|
||||
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
|
||||
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
|
||||
@@ -390,7 +409,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
|
||||
return -EFAULT;
|
||||
|
||||
/* Request setup */
|
||||
qat_alg_init_common_hdr(header);
|
||||
qat_alg_init_common_hdr(header, 1);
|
||||
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
|
||||
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
|
||||
ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
|
||||
@@ -454,7 +473,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
|
||||
struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
|
||||
|
||||
memcpy(cd->aes.key, key, keylen);
|
||||
qat_alg_init_common_hdr(header);
|
||||
qat_alg_init_common_hdr(header, 0);
|
||||
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
|
||||
cd_pars->u.s.content_desc_params_sz =
|
||||
sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
|
||||
@@ -576,45 +595,52 @@ bad_key:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||
static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
|
||||
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
|
||||
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
|
||||
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
|
||||
|
||||
return qat_alg_aead_init_sessions(tfm, key, keylen,
|
||||
ICP_QAT_HW_CIPHER_CBC_MODE);
|
||||
}
|
||||
|
||||
static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct qat_crypto_instance *inst = NULL;
|
||||
int node = get_current_node();
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
if (ctx->enc_cd) {
|
||||
/* rekeying */
|
||||
dev = &GET_DEV(ctx->inst->accel_dev);
|
||||
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
|
||||
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
|
||||
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
|
||||
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
|
||||
} else {
|
||||
/* new key */
|
||||
int node = get_current_node();
|
||||
struct qat_crypto_instance *inst =
|
||||
qat_crypto_get_instance_node(node);
|
||||
if (!inst) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
goto out_free_enc;
|
||||
}
|
||||
inst = qat_crypto_get_instance_node(node);
|
||||
if (!inst)
|
||||
return -EINVAL;
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_inst;
|
||||
}
|
||||
if (qat_alg_aead_init_sessions(tfm, key, keylen,
|
||||
ICP_QAT_HW_CIPHER_CBC_MODE))
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_enc;
|
||||
}
|
||||
|
||||
ret = qat_alg_aead_init_sessions(tfm, key, keylen,
|
||||
ICP_QAT_HW_CIPHER_CBC_MODE);
|
||||
if (ret)
|
||||
goto out_free_all;
|
||||
|
||||
return 0;
|
||||
@@ -629,7 +655,21 @@ out_free_enc:
|
||||
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
|
||||
ctx->enc_cd, ctx->enc_cd_paddr);
|
||||
ctx->enc_cd = NULL;
|
||||
return -ENOMEM;
|
||||
out_free_inst:
|
||||
ctx->inst = NULL;
|
||||
qat_crypto_put_instance(inst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
if (ctx->enc_cd)
|
||||
return qat_alg_aead_rekey(tfm, key, keylen);
|
||||
else
|
||||
return qat_alg_aead_newkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
||||
@@ -677,8 +717,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
dma_addr_t blp;
|
||||
dma_addr_t bloutp = 0;
|
||||
struct scatterlist *sg;
|
||||
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
|
||||
((1 + n) * sizeof(struct qat_alg_buf));
|
||||
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
|
||||
|
||||
if (unlikely(!n))
|
||||
return -EINVAL;
|
||||
@@ -715,8 +754,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
struct qat_alg_buf *bufers;
|
||||
|
||||
n = sg_nents(sglout);
|
||||
sz_out = sizeof(struct qat_alg_buf_list) +
|
||||
((1 + n) * sizeof(struct qat_alg_buf));
|
||||
sz_out = struct_size(buflout, bufers, n + 1);
|
||||
sg_nctr = 0;
|
||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
|
||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
||||
@@ -801,11 +839,17 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
|
||||
struct qat_crypto_instance *inst = ctx->inst;
|
||||
struct ablkcipher_request *areq = qat_req->ablkcipher_req;
|
||||
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
|
||||
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
|
||||
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
|
||||
|
||||
qat_alg_free_bufl(inst, qat_req);
|
||||
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
|
||||
res = -EINVAL;
|
||||
|
||||
memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
|
||||
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
|
||||
qat_req->iv_paddr);
|
||||
|
||||
areq->base.complete(&areq->base, res);
|
||||
}
|
||||
|
||||
@@ -905,50 +949,49 @@ static int qat_alg_aead_enc(struct aead_request *areq)
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
||||
static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
|
||||
const u8 *key, unsigned int keylen,
|
||||
int mode)
|
||||
{
|
||||
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
|
||||
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
|
||||
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
|
||||
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
|
||||
|
||||
return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
|
||||
const u8 *key, unsigned int keylen,
|
||||
int mode)
|
||||
{
|
||||
struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
struct qat_crypto_instance *inst = NULL;
|
||||
struct device *dev;
|
||||
int node = get_current_node();
|
||||
int ret;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
if (ctx->enc_cd) {
|
||||
/* rekeying */
|
||||
dev = &GET_DEV(ctx->inst->accel_dev);
|
||||
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
|
||||
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
|
||||
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
|
||||
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
|
||||
} else {
|
||||
/* new key */
|
||||
int node = get_current_node();
|
||||
struct qat_crypto_instance *inst =
|
||||
qat_crypto_get_instance_node(node);
|
||||
if (!inst) {
|
||||
spin_unlock(&ctx->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
spin_unlock(&ctx->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
spin_unlock(&ctx->lock);
|
||||
goto out_free_enc;
|
||||
}
|
||||
inst = qat_crypto_get_instance_node(node);
|
||||
if (!inst)
|
||||
return -EINVAL;
|
||||
dev = &GET_DEV(inst->accel_dev);
|
||||
ctx->inst = inst;
|
||||
ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
&ctx->enc_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->enc_cd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_instance;
|
||||
}
|
||||
spin_unlock(&ctx->lock);
|
||||
if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
|
||||
ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||
&ctx->dec_cd_paddr,
|
||||
GFP_ATOMIC);
|
||||
if (!ctx->dec_cd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_enc;
|
||||
}
|
||||
|
||||
ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
|
||||
if (ret)
|
||||
goto out_free_all;
|
||||
|
||||
return 0;
|
||||
@@ -963,7 +1006,22 @@ out_free_enc:
|
||||
dma_free_coherent(dev, sizeof(*ctx->enc_cd),
|
||||
ctx->enc_cd, ctx->enc_cd_paddr);
|
||||
ctx->enc_cd = NULL;
|
||||
return -ENOMEM;
|
||||
out_free_instance:
|
||||
ctx->inst = NULL;
|
||||
qat_crypto_put_instance(inst);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
||||
const u8 *key, unsigned int keylen,
|
||||
int mode)
|
||||
{
|
||||
struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (ctx->enc_cd)
|
||||
return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
|
||||
else
|
||||
return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
|
||||
@@ -995,11 +1053,23 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
|
||||
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
|
||||
int ret, ctr = 0;
|
||||
|
||||
if (req->nbytes == 0)
|
||||
return 0;
|
||||
|
||||
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
|
||||
&qat_req->iv_paddr, GFP_ATOMIC);
|
||||
if (!qat_req->iv)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
|
||||
qat_req->iv_paddr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
msg = &qat_req->req;
|
||||
*msg = ctx->enc_fw_req;
|
||||
@@ -1012,18 +1082,29 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
|
||||
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
||||
cipher_param->cipher_length = req->nbytes;
|
||||
cipher_param->cipher_offset = 0;
|
||||
memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
|
||||
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
|
||||
memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
|
||||
do {
|
||||
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
||||
} while (ret == -EAGAIN && ctr++ < 10);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
qat_alg_free_bufl(ctx->inst, qat_req);
|
||||
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
|
||||
qat_req->iv_paddr);
|
||||
return -EBUSY;
|
||||
}
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
if (req->nbytes % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return qat_alg_ablkcipher_encrypt(req);
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
|
||||
@@ -1032,11 +1113,23 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||
struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
|
||||
int ret, ctr = 0;
|
||||
|
||||
if (req->nbytes == 0)
|
||||
return 0;
|
||||
|
||||
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
|
||||
&qat_req->iv_paddr, GFP_ATOMIC);
|
||||
if (!qat_req->iv)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
|
||||
qat_req->iv_paddr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
msg = &qat_req->req;
|
||||
*msg = ctx->dec_fw_req;
|
||||
@@ -1049,18 +1142,28 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
||||
cipher_param->cipher_length = req->nbytes;
|
||||
cipher_param->cipher_offset = 0;
|
||||
memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
|
||||
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
|
||||
memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
|
||||
do {
|
||||
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
||||
} while (ret == -EAGAIN && ctr++ < 10);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
qat_alg_free_bufl(ctx->inst, qat_req);
|
||||
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
|
||||
qat_req->iv_paddr);
|
||||
return -EBUSY;
|
||||
}
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
if (req->nbytes % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return qat_alg_ablkcipher_decrypt(req);
|
||||
}
|
||||
static int qat_alg_aead_init(struct crypto_aead *tfm,
|
||||
enum icp_qat_hw_auth_algo hash,
|
||||
const char *hash_name)
|
||||
@@ -1119,7 +1222,6 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
|
||||
ctx->tfm = tfm;
|
||||
return 0;
|
||||
@@ -1221,8 +1323,8 @@ static struct crypto_alg qat_algs[] = { {
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.setkey = qat_alg_ablkcipher_cbc_setkey,
|
||||
.decrypt = qat_alg_ablkcipher_decrypt,
|
||||
.encrypt = qat_alg_ablkcipher_encrypt,
|
||||
.decrypt = qat_alg_ablkcipher_blk_decrypt,
|
||||
.encrypt = qat_alg_ablkcipher_blk_encrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
@@ -1233,7 +1335,7 @@ static struct crypto_alg qat_algs[] = { {
|
||||
.cra_driver_name = "qat_aes_ctr",
|
||||
.cra_priority = 4001,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
@@ -1265,8 +1367,8 @@ static struct crypto_alg qat_algs[] = { {
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.setkey = qat_alg_ablkcipher_xts_setkey,
|
||||
.decrypt = qat_alg_ablkcipher_decrypt,
|
||||
.encrypt = qat_alg_ablkcipher_encrypt,
|
||||
.decrypt = qat_alg_ablkcipher_blk_decrypt,
|
||||
.encrypt = qat_alg_ablkcipher_blk_encrypt,
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
|
@@ -88,6 +88,8 @@ struct qat_crypto_request {
|
||||
struct qat_crypto_request_buffs buf;
|
||||
void (*cb)(struct icp_qat_fw_la_resp *resp,
|
||||
struct qat_crypto_request *req);
|
||||
void *iv;
|
||||
dma_addr_t iv_paddr;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -1384,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, sahara_dt_ids);
|
||||
static int sahara_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sahara_dev *dev;
|
||||
struct resource *res;
|
||||
u32 version;
|
||||
int irq;
|
||||
int err;
|
||||
@@ -1398,8 +1397,7 @@ static int sahara_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
/* Get the base address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(dev->regs_base))
|
||||
return PTR_ERR(dev->regs_base);
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
|
||||
|
@@ -349,7 +349,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if ((hdev->flags & HASH_FLAGS_HMAC) &&
|
||||
(hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
|
||||
(!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
|
||||
hdev->flags |= HASH_FLAGS_HMAC_KEY;
|
||||
stm32_hash_write_key(hdev);
|
||||
if (stm32_hash_wait_busy(hdev))
|
||||
@@ -447,8 +447,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
|
||||
|
||||
dma_async_issue_pending(hdev->dma_lch);
|
||||
|
||||
if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
|
||||
msecs_to_jiffies(100)))
|
||||
if (!wait_for_completion_timeout(&hdev->dma_completion,
|
||||
msecs_to_jiffies(100)))
|
||||
err = -ETIMEDOUT;
|
||||
|
||||
if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
|
||||
|
@@ -12,7 +12,7 @@
|
||||
*/
|
||||
#include "sun4i-ss.h"
|
||||
|
||||
static int sun4i_ss_opti_poll(struct skcipher_request *areq)
|
||||
static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
||||
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
@@ -114,6 +114,29 @@ release_ss:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
|
||||
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
int err;
|
||||
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL,
|
||||
NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (ctx->mode & SS_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Generic function that support SG with size not multiple of 4 */
|
||||
static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
{
|
||||
@@ -140,8 +163,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
unsigned int todo;
|
||||
struct sg_mapping_iter mi, mo;
|
||||
unsigned int oi, oo; /* offset for in and out */
|
||||
char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
|
||||
char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
|
||||
unsigned int ob = 0; /* offset in buf */
|
||||
unsigned int obo = 0; /* offset in bufo*/
|
||||
unsigned int obl = 0; /* length of data in bufo */
|
||||
@@ -178,20 +199,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
if (no_chunk == 1 && !need_fallback)
|
||||
return sun4i_ss_opti_poll(areq);
|
||||
|
||||
if (need_fallback) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
|
||||
skcipher_request_set_callback(subreq, areq->base.flags, NULL,
|
||||
NULL);
|
||||
skcipher_request_set_crypt(subreq, areq->src, areq->dst,
|
||||
areq->cryptlen, areq->iv);
|
||||
if (ctx->mode & SS_DECRYPTION)
|
||||
err = crypto_skcipher_decrypt(subreq);
|
||||
else
|
||||
err = crypto_skcipher_encrypt(subreq);
|
||||
skcipher_request_zero(subreq);
|
||||
return err;
|
||||
}
|
||||
if (need_fallback)
|
||||
return sun4i_ss_cipher_poll_fallback(areq);
|
||||
|
||||
spin_lock_irqsave(&ss->slock, flags);
|
||||
|
||||
@@ -224,6 +233,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
|
||||
while (oleft) {
|
||||
if (ileft) {
|
||||
char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
|
||||
|
||||
/*
|
||||
* todo is the number of consecutive 4byte word that we
|
||||
* can read from current SG
|
||||
@@ -281,6 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
||||
oo = 0;
|
||||
}
|
||||
} else {
|
||||
char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
|
||||
|
||||
/*
|
||||
* read obl bytes in bufo, we read at maximum for
|
||||
* emptying the device
|
||||
|
@@ -265,11 +265,11 @@ static int init_device(struct device *dev)
|
||||
* callback must check err and feedback in descriptor header
|
||||
* for device processing status.
|
||||
*/
|
||||
int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||
void (*callback)(struct device *dev,
|
||||
struct talitos_desc *desc,
|
||||
void *context, int error),
|
||||
void *context)
|
||||
static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||
void (*callback)(struct device *dev,
|
||||
struct talitos_desc *desc,
|
||||
void *context, int error),
|
||||
void *context)
|
||||
{
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
struct talitos_request *request;
|
||||
@@ -319,7 +319,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
EXPORT_SYMBOL(talitos_submit);
|
||||
|
||||
static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
|
||||
{
|
||||
struct talitos_edesc *edesc;
|
||||
|
||||
if (!is_sec1)
|
||||
return request->desc->hdr;
|
||||
|
||||
if (!request->desc->next_desc)
|
||||
return request->desc->hdr1;
|
||||
|
||||
edesc = container_of(request->desc, struct talitos_edesc, desc);
|
||||
|
||||
return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
|
||||
}
|
||||
|
||||
/*
|
||||
* process what was done, notify callback of error if not
|
||||
@@ -342,12 +356,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
|
||||
|
||||
/* descriptors with their done bits set don't get the error */
|
||||
rmb();
|
||||
if (!is_sec1)
|
||||
hdr = request->desc->hdr;
|
||||
else if (request->desc->next_desc)
|
||||
hdr = (request->desc + 1)->hdr1;
|
||||
else
|
||||
hdr = request->desc->hdr1;
|
||||
hdr = get_request_hdr(request, is_sec1);
|
||||
|
||||
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
|
||||
status = 0;
|
||||
@@ -477,8 +486,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
|
||||
return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
|
||||
if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
|
||||
struct talitos_edesc *edesc;
|
||||
|
||||
edesc = container_of(priv->chan[ch].fifo[iter].desc,
|
||||
struct talitos_edesc, desc);
|
||||
return ((struct talitos_desc *)
|
||||
(edesc->buf + edesc->dma_len))->hdr;
|
||||
}
|
||||
|
||||
return priv->chan[ch].fifo[iter].desc->hdr;
|
||||
}
|
||||
@@ -824,7 +839,11 @@ static void talitos_unregister_rng(struct device *dev)
|
||||
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
|
||||
*/
|
||||
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
|
||||
#ifdef CONFIG_CRYPTO_DEV_TALITOS2
|
||||
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
|
||||
#else
|
||||
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
|
||||
#endif
|
||||
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
|
||||
|
||||
struct talitos_ctx {
|
||||
@@ -948,36 +967,6 @@ badkey:
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* talitos_edesc - s/w-extended descriptor
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @dst_nents: number of segments in output scatterlist
|
||||
* @icv_ool: whether ICV is out-of-line
|
||||
* @iv_dma: dma address of iv for checking continuity and link table
|
||||
* @dma_len: length of dma mapped link_tbl space
|
||||
* @dma_link_tbl: bus physical address of link_tbl/buf
|
||||
* @desc: h/w descriptor
|
||||
* @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
|
||||
* @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
|
||||
*
|
||||
* if decrypting (with authcheck), or either one of src_nents or dst_nents
|
||||
* is greater than 1, an integrity check value is concatenated to the end
|
||||
* of link_tbl data
|
||||
*/
|
||||
struct talitos_edesc {
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
bool icv_ool;
|
||||
dma_addr_t iv_dma;
|
||||
int dma_len;
|
||||
dma_addr_t dma_link_tbl;
|
||||
struct talitos_desc desc;
|
||||
union {
|
||||
struct talitos_ptr link_tbl[0];
|
||||
u8 buf[0];
|
||||
};
|
||||
};
|
||||
|
||||
static void talitos_sg_unmap(struct device *dev,
|
||||
struct talitos_edesc *edesc,
|
||||
struct scatterlist *src,
|
||||
@@ -1008,11 +997,13 @@ static void talitos_sg_unmap(struct device *dev,
|
||||
|
||||
static void ipsec_esp_unmap(struct device *dev,
|
||||
struct talitos_edesc *edesc,
|
||||
struct aead_request *areq)
|
||||
struct aead_request *areq, bool encrypt)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
|
||||
bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
|
||||
struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
|
||||
|
||||
@@ -1021,8 +1012,8 @@ static void ipsec_esp_unmap(struct device *dev,
|
||||
DMA_FROM_DEVICE);
|
||||
unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
|
||||
|
||||
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
|
||||
areq->assoclen);
|
||||
talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
|
||||
cryptlen + authsize, areq->assoclen);
|
||||
|
||||
if (edesc->dma_len)
|
||||
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
|
||||
@@ -1032,7 +1023,7 @@ static void ipsec_esp_unmap(struct device *dev,
|
||||
unsigned int dst_nents = edesc->dst_nents ? : 1;
|
||||
|
||||
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
|
||||
areq->assoclen + areq->cryptlen - ivsize);
|
||||
areq->assoclen + cryptlen - ivsize);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1043,31 +1034,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
|
||||
struct talitos_desc *desc, void *context,
|
||||
int err)
|
||||
{
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
bool is_sec1 = has_ftr_sec1(priv);
|
||||
struct aead_request *areq = context;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
struct talitos_edesc *edesc;
|
||||
struct scatterlist *sg;
|
||||
void *icvdata;
|
||||
|
||||
edesc = container_of(desc, struct talitos_edesc, desc);
|
||||
|
||||
ipsec_esp_unmap(dev, edesc, areq);
|
||||
|
||||
/* copy the generated ICV to dst */
|
||||
if (edesc->icv_ool) {
|
||||
if (is_sec1)
|
||||
icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
|
||||
else
|
||||
icvdata = &edesc->link_tbl[edesc->src_nents +
|
||||
edesc->dst_nents + 2];
|
||||
sg = sg_last(areq->dst, edesc->dst_nents);
|
||||
memcpy((char *)sg_virt(sg) + sg->length - authsize,
|
||||
icvdata, authsize);
|
||||
}
|
||||
ipsec_esp_unmap(dev, edesc, areq, true);
|
||||
|
||||
dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
|
||||
|
||||
@@ -1084,32 +1058,16 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
struct talitos_edesc *edesc;
|
||||
struct scatterlist *sg;
|
||||
char *oicv, *icv;
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
bool is_sec1 = has_ftr_sec1(priv);
|
||||
|
||||
edesc = container_of(desc, struct talitos_edesc, desc);
|
||||
|
||||
ipsec_esp_unmap(dev, edesc, req);
|
||||
ipsec_esp_unmap(dev, edesc, req, false);
|
||||
|
||||
if (!err) {
|
||||
/* auth check */
|
||||
sg = sg_last(req->dst, edesc->dst_nents ? : 1);
|
||||
icv = (char *)sg_virt(sg) + sg->length - authsize;
|
||||
|
||||
if (edesc->dma_len) {
|
||||
if (is_sec1)
|
||||
oicv = (char *)&edesc->dma_link_tbl +
|
||||
req->assoclen + req->cryptlen;
|
||||
else
|
||||
oicv = (char *)
|
||||
&edesc->link_tbl[edesc->src_nents +
|
||||
edesc->dst_nents + 2];
|
||||
if (edesc->icv_ool)
|
||||
icv = oicv + authsize;
|
||||
} else
|
||||
oicv = (char *)&edesc->link_tbl[0];
|
||||
oicv = edesc->buf + edesc->dma_len;
|
||||
icv = oicv - authsize;
|
||||
|
||||
err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
|
||||
}
|
||||
@@ -1128,7 +1086,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
|
||||
|
||||
edesc = container_of(desc, struct talitos_edesc, desc);
|
||||
|
||||
ipsec_esp_unmap(dev, edesc, req);
|
||||
ipsec_esp_unmap(dev, edesc, req, false);
|
||||
|
||||
/* check ICV auth status */
|
||||
if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
|
||||
@@ -1145,11 +1103,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
|
||||
* stop at cryptlen bytes
|
||||
*/
|
||||
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
|
||||
unsigned int offset, int cryptlen,
|
||||
unsigned int offset, int datalen, int elen,
|
||||
struct talitos_ptr *link_tbl_ptr)
|
||||
{
|
||||
int n_sg = sg_count;
|
||||
int n_sg = elen ? sg_count + 1 : sg_count;
|
||||
int count = 0;
|
||||
int cryptlen = datalen + elen;
|
||||
|
||||
while (cryptlen && sg && n_sg--) {
|
||||
unsigned int len = sg_dma_len(sg);
|
||||
@@ -1164,11 +1123,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
|
||||
if (len > cryptlen)
|
||||
len = cryptlen;
|
||||
|
||||
if (datalen > 0 && len > datalen) {
|
||||
to_talitos_ptr(link_tbl_ptr + count,
|
||||
sg_dma_address(sg) + offset, datalen, 0);
|
||||
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
|
||||
count++;
|
||||
len -= datalen;
|
||||
offset += datalen;
|
||||
}
|
||||
to_talitos_ptr(link_tbl_ptr + count,
|
||||
sg_dma_address(sg) + offset, len, 0);
|
||||
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
|
||||
count++;
|
||||
cryptlen -= len;
|
||||
datalen -= len;
|
||||
offset = 0;
|
||||
|
||||
next:
|
||||
@@ -1178,7 +1146,7 @@ next:
|
||||
/* tag end of link table */
|
||||
if (count > 0)
|
||||
to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
|
||||
DESC_PTR_LNKTBL_RETURN, 0);
|
||||
DESC_PTR_LNKTBL_RET, 0);
|
||||
|
||||
return count;
|
||||
}
|
||||
@@ -1186,7 +1154,8 @@ next:
|
||||
static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
|
||||
unsigned int len, struct talitos_edesc *edesc,
|
||||
struct talitos_ptr *ptr, int sg_count,
|
||||
unsigned int offset, int tbl_off, int elen)
|
||||
unsigned int offset, int tbl_off, int elen,
|
||||
bool force)
|
||||
{
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
bool is_sec1 = has_ftr_sec1(priv);
|
||||
@@ -1196,7 +1165,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
|
||||
return 1;
|
||||
}
|
||||
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
|
||||
if (sg_count == 1) {
|
||||
if (sg_count == 1 && !force) {
|
||||
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
|
||||
return sg_count;
|
||||
}
|
||||
@@ -1204,9 +1173,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
|
||||
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
|
||||
return sg_count;
|
||||
}
|
||||
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
|
||||
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
|
||||
&edesc->link_tbl[tbl_off]);
|
||||
if (sg_count == 1) {
|
||||
if (sg_count == 1 && !force) {
|
||||
/* Only one segment now, so no link tbl needed*/
|
||||
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
|
||||
return sg_count;
|
||||
@@ -1224,13 +1193,14 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
|
||||
unsigned int offset, int tbl_off)
|
||||
{
|
||||
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
|
||||
tbl_off, 0);
|
||||
tbl_off, 0, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* fill in and submit ipsec_esp descriptor
|
||||
*/
|
||||
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
bool encrypt,
|
||||
void (*callback)(struct device *dev,
|
||||
struct talitos_desc *desc,
|
||||
void *context, int error))
|
||||
@@ -1240,7 +1210,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *dev = ctx->dev;
|
||||
struct talitos_desc *desc = &edesc->desc;
|
||||
unsigned int cryptlen = areq->cryptlen;
|
||||
unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
int tbl_off = 0;
|
||||
int sg_count, ret;
|
||||
@@ -1251,6 +1221,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
|
||||
struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
|
||||
struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
|
||||
dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
|
||||
|
||||
/* hmac key */
|
||||
to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
|
||||
@@ -1290,7 +1261,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
elen = authsize;
|
||||
|
||||
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
|
||||
sg_count, areq->assoclen, tbl_off, elen);
|
||||
sg_count, areq->assoclen, tbl_off, elen,
|
||||
false);
|
||||
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
@@ -1304,55 +1276,32 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
|
||||
sg_count, areq->assoclen, tbl_off);
|
||||
if (is_ipsec_esp && encrypt)
|
||||
elen = authsize;
|
||||
else
|
||||
elen = 0;
|
||||
ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
|
||||
sg_count, areq->assoclen, tbl_off, elen,
|
||||
is_ipsec_esp && !encrypt);
|
||||
tbl_off += ret;
|
||||
|
||||
if (is_ipsec_esp)
|
||||
if (!encrypt && is_ipsec_esp) {
|
||||
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
|
||||
|
||||
/* Add an entry to the link table for ICV data */
|
||||
to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
|
||||
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
|
||||
|
||||
/* icv data follows link tables */
|
||||
to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
|
||||
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
|
||||
|
||||
/* ICV data */
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
edesc->icv_ool = true;
|
||||
sync_needed = true;
|
||||
|
||||
if (is_ipsec_esp) {
|
||||
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
|
||||
int offset = (edesc->src_nents + edesc->dst_nents + 2) *
|
||||
sizeof(struct talitos_ptr) + authsize;
|
||||
|
||||
/* Add an entry to the link table for ICV data */
|
||||
to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
|
||||
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
|
||||
is_sec1);
|
||||
|
||||
/* icv data follows link tables */
|
||||
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
|
||||
authsize, is_sec1);
|
||||
} else {
|
||||
dma_addr_t addr = edesc->dma_link_tbl;
|
||||
|
||||
if (is_sec1)
|
||||
addr += areq->assoclen + cryptlen;
|
||||
else
|
||||
addr += sizeof(struct talitos_ptr) * tbl_off;
|
||||
|
||||
to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
|
||||
}
|
||||
} else if (!encrypt) {
|
||||
to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
|
||||
sync_needed = true;
|
||||
} else if (!is_ipsec_esp) {
|
||||
ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
|
||||
&desc->ptr[6], sg_count, areq->assoclen +
|
||||
cryptlen,
|
||||
tbl_off);
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
edesc->icv_ool = true;
|
||||
sync_needed = true;
|
||||
} else {
|
||||
edesc->icv_ool = false;
|
||||
}
|
||||
} else {
|
||||
edesc->icv_ool = false;
|
||||
talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
|
||||
sg_count, areq->assoclen + cryptlen, tbl_off);
|
||||
}
|
||||
|
||||
/* iv out */
|
||||
@@ -1367,7 +1316,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
|
||||
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
|
||||
if (ret != -EINPROGRESS) {
|
||||
ipsec_esp_unmap(dev, edesc, areq);
|
||||
ipsec_esp_unmap(dev, edesc, areq, encrypt);
|
||||
kfree(edesc);
|
||||
}
|
||||
return ret;
|
||||
@@ -1435,18 +1384,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
||||
* and space for two sets of ICVs (stashed and generated)
|
||||
*/
|
||||
alloc_len = sizeof(struct talitos_edesc);
|
||||
if (src_nents || dst_nents) {
|
||||
if (src_nents || dst_nents || !encrypt) {
|
||||
if (is_sec1)
|
||||
dma_len = (src_nents ? src_len : 0) +
|
||||
(dst_nents ? dst_len : 0);
|
||||
(dst_nents ? dst_len : 0) + authsize;
|
||||
else
|
||||
dma_len = (src_nents + dst_nents + 2) *
|
||||
sizeof(struct talitos_ptr) + authsize * 2;
|
||||
sizeof(struct talitos_ptr) + authsize;
|
||||
alloc_len += dma_len;
|
||||
} else {
|
||||
dma_len = 0;
|
||||
alloc_len += icv_stashing ? authsize : 0;
|
||||
}
|
||||
alloc_len += icv_stashing ? authsize : 0;
|
||||
|
||||
/* if its a ahash, add space for a second desc next to the first one */
|
||||
if (is_sec1 && !dst)
|
||||
@@ -1466,15 +1415,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->iv_dma = iv_dma;
|
||||
edesc->dma_len = dma_len;
|
||||
if (dma_len) {
|
||||
void *addr = &edesc->link_tbl[0];
|
||||
|
||||
if (is_sec1 && !dst)
|
||||
addr += sizeof(struct talitos_desc);
|
||||
edesc->dma_link_tbl = dma_map_single(dev, addr,
|
||||
if (dma_len)
|
||||
edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
|
||||
edesc->dma_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
return edesc;
|
||||
}
|
||||
|
||||
@@ -1485,9 +1430,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
|
||||
|
||||
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
|
||||
iv, areq->assoclen, areq->cryptlen,
|
||||
iv, areq->assoclen, cryptlen,
|
||||
authsize, ivsize, icv_stashing,
|
||||
areq->base.flags, encrypt);
|
||||
}
|
||||
@@ -1506,7 +1452,7 @@ static int aead_encrypt(struct aead_request *req)
|
||||
/* set encrypt */
|
||||
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
|
||||
|
||||
return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
|
||||
return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
|
||||
}
|
||||
|
||||
static int aead_decrypt(struct aead_request *req)
|
||||
@@ -1516,17 +1462,15 @@ static int aead_decrypt(struct aead_request *req)
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
|
||||
struct talitos_edesc *edesc;
|
||||
struct scatterlist *sg;
|
||||
void *icvdata;
|
||||
|
||||
req->cryptlen -= authsize;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = aead_edesc_alloc(req, req->iv, 1, false);
|
||||
if (IS_ERR(edesc))
|
||||
return PTR_ERR(edesc);
|
||||
|
||||
if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
|
||||
if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
|
||||
(priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
|
||||
((!edesc->src_nents && !edesc->dst_nents) ||
|
||||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
|
||||
|
||||
@@ -1537,24 +1481,20 @@ static int aead_decrypt(struct aead_request *req)
|
||||
|
||||
/* reset integrity check result bits */
|
||||
|
||||
return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
|
||||
return ipsec_esp(edesc, req, false,
|
||||
ipsec_esp_decrypt_hwauth_done);
|
||||
}
|
||||
|
||||
/* Have to check the ICV with software */
|
||||
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
|
||||
|
||||
/* stash incoming ICV for later cmp with ICV generated by the h/w */
|
||||
if (edesc->dma_len)
|
||||
icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
|
||||
edesc->dst_nents + 2];
|
||||
else
|
||||
icvdata = &edesc->link_tbl[0];
|
||||
icvdata = edesc->buf + edesc->dma_len;
|
||||
|
||||
sg = sg_last(req->src, edesc->src_nents ? : 1);
|
||||
sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
|
||||
req->assoclen + req->cryptlen - authsize);
|
||||
|
||||
memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
|
||||
|
||||
return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
|
||||
return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
|
||||
}
|
||||
|
||||
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
||||
@@ -1605,6 +1545,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
|
||||
return ablkcipher_setkey(cipher, key, keylen);
|
||||
}
|
||||
|
||||
static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
|
||||
keylen == AES_KEYSIZE_256)
|
||||
return ablkcipher_setkey(cipher, key, keylen);
|
||||
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void common_nonsnoop_unmap(struct device *dev,
|
||||
struct talitos_edesc *edesc,
|
||||
struct ablkcipher_request *areq)
|
||||
@@ -1624,11 +1576,15 @@ static void ablkcipher_done(struct device *dev,
|
||||
int err)
|
||||
{
|
||||
struct ablkcipher_request *areq = context;
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
|
||||
struct talitos_edesc *edesc;
|
||||
|
||||
edesc = container_of(desc, struct talitos_edesc, desc);
|
||||
|
||||
common_nonsnoop_unmap(dev, edesc, areq);
|
||||
memcpy(areq->info, ctx->iv, ivsize);
|
||||
|
||||
kfree(edesc);
|
||||
|
||||
@@ -1723,6 +1679,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
struct talitos_edesc *edesc;
|
||||
unsigned int blocksize =
|
||||
crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
|
||||
|
||||
if (!areq->nbytes)
|
||||
return 0;
|
||||
|
||||
if (areq->nbytes % blocksize)
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = ablkcipher_edesc_alloc(areq, true);
|
||||
@@ -1740,6 +1704,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
|
||||
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
struct talitos_edesc *edesc;
|
||||
unsigned int blocksize =
|
||||
crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
|
||||
|
||||
if (!areq->nbytes)
|
||||
return 0;
|
||||
|
||||
if (areq->nbytes % blocksize)
|
||||
return -EINVAL;
|
||||
|
||||
/* allocate extended descriptor */
|
||||
edesc = ablkcipher_edesc_alloc(areq, false);
|
||||
@@ -1759,14 +1731,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
bool is_sec1 = has_ftr_sec1(priv);
|
||||
struct talitos_desc *desc = &edesc->desc;
|
||||
struct talitos_desc *desc2 = desc + 1;
|
||||
struct talitos_desc *desc2 = (struct talitos_desc *)
|
||||
(edesc->buf + edesc->dma_len);
|
||||
|
||||
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
|
||||
if (desc->next_desc &&
|
||||
desc->ptr[5].ptr != desc2->ptr[5].ptr)
|
||||
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
|
||||
|
||||
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
|
||||
if (req_ctx->psrc)
|
||||
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
|
||||
|
||||
/* When using hashctx-in, must unmap it. */
|
||||
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
|
||||
@@ -1833,7 +1807,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
|
||||
|
||||
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||
struct ahash_request *areq, unsigned int length,
|
||||
unsigned int offset,
|
||||
void (*callback) (struct device *dev,
|
||||
struct talitos_desc *desc,
|
||||
void *context, int error))
|
||||
@@ -1872,9 +1845,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||
|
||||
sg_count = edesc->src_nents ?: 1;
|
||||
if (is_sec1 && sg_count > 1)
|
||||
sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
|
||||
edesc->buf + sizeof(struct talitos_desc),
|
||||
length, req_ctx->nbuf);
|
||||
sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
|
||||
else if (length)
|
||||
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -1887,7 +1858,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||
DMA_TO_DEVICE);
|
||||
} else {
|
||||
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
||||
&desc->ptr[3], sg_count, offset, 0);
|
||||
&desc->ptr[3], sg_count, 0, 0);
|
||||
if (sg_count > 1)
|
||||
sync_needed = true;
|
||||
}
|
||||
@@ -1911,7 +1882,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
|
||||
|
||||
if (is_sec1 && req_ctx->nbuf && length) {
|
||||
struct talitos_desc *desc2 = desc + 1;
|
||||
struct talitos_desc *desc2 = (struct talitos_desc *)
|
||||
(edesc->buf + edesc->dma_len);
|
||||
dma_addr_t next_desc;
|
||||
|
||||
memset(desc2, 0, sizeof(*desc2));
|
||||
@@ -1932,7 +1904,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||
DMA_TO_DEVICE);
|
||||
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
|
||||
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
||||
&desc2->ptr[3], sg_count, offset, 0);
|
||||
&desc2->ptr[3], sg_count, 0, 0);
|
||||
if (sg_count > 1)
|
||||
sync_needed = true;
|
||||
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
|
||||
@@ -2043,7 +2015,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||
struct device *dev = ctx->dev;
|
||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||
bool is_sec1 = has_ftr_sec1(priv);
|
||||
int offset = 0;
|
||||
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
|
||||
|
||||
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
|
||||
@@ -2083,6 +2054,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||
sg_chain(req_ctx->bufsl, 2, areq->src);
|
||||
req_ctx->psrc = req_ctx->bufsl;
|
||||
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
|
||||
int offset;
|
||||
|
||||
if (nbytes_to_hash > blocksize)
|
||||
offset = blocksize - req_ctx->nbuf;
|
||||
else
|
||||
@@ -2095,7 +2068,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||
sg_copy_to_buffer(areq->src, nents,
|
||||
ctx_buf + req_ctx->nbuf, offset);
|
||||
req_ctx->nbuf += offset;
|
||||
req_ctx->psrc = areq->src;
|
||||
req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
|
||||
offset);
|
||||
} else
|
||||
req_ctx->psrc = areq->src;
|
||||
|
||||
@@ -2135,8 +2109,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||
if (ctx->keylen && (req_ctx->first || req_ctx->last))
|
||||
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
|
||||
|
||||
return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
|
||||
ahash_done);
|
||||
return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
|
||||
}
|
||||
|
||||
static int ahash_update(struct ahash_request *areq)
|
||||
@@ -2339,7 +2312,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-"
|
||||
"cbc-aes-talitos",
|
||||
"cbc-aes-talitos-hsna",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2384,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha1),"
|
||||
"cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha1-"
|
||||
"cbc-3des-talitos",
|
||||
"cbc-3des-talitos-hsna",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2427,7 +2400,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha224),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha224-"
|
||||
"cbc-aes-talitos",
|
||||
"cbc-aes-talitos-hsna",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2472,7 +2445,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha224),"
|
||||
"cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha224-"
|
||||
"cbc-3des-talitos",
|
||||
"cbc-3des-talitos-hsna",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2515,7 +2488,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-"
|
||||
"cbc-aes-talitos",
|
||||
"cbc-aes-talitos-hsna",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2560,7 +2533,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_name = "authenc(hmac(sha256),"
|
||||
"cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-sha256-"
|
||||
"cbc-3des-talitos",
|
||||
"cbc-3des-talitos-hsna",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2689,7 +2662,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(aes))",
|
||||
.cra_driver_name = "authenc-hmac-md5-"
|
||||
"cbc-aes-talitos",
|
||||
"cbc-aes-talitos-hsna",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2732,7 +2705,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.base = {
|
||||
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
|
||||
.cra_driver_name = "authenc-hmac-md5-"
|
||||
"cbc-3des-talitos",
|
||||
"cbc-3des-talitos-hsna",
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC,
|
||||
},
|
||||
@@ -2760,7 +2733,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_aes_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -2777,6 +2750,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_aes_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
@@ -2787,13 +2761,14 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.alg.crypto = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-talitos",
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_blocksize = 1,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_ASYNC,
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_aes_setkey,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
|
||||
@@ -2810,7 +2785,6 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des_setkey,
|
||||
}
|
||||
},
|
||||
@@ -2845,7 +2819,6 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.cra_ablkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = ablkcipher_des3_setkey,
|
||||
}
|
||||
},
|
||||
@@ -3270,7 +3243,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
alg->cra_priority = t_alg->algt.priority;
|
||||
else
|
||||
alg->cra_priority = TALITOS_CRA_PRIORITY;
|
||||
alg->cra_alignmask = 0;
|
||||
if (has_ftr_sec1(priv))
|
||||
alg->cra_alignmask = 3;
|
||||
else
|
||||
alg->cra_alignmask = 0;
|
||||
alg->cra_ctxsize = sizeof(struct talitos_ctx);
|
||||
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
|
||||
|
||||
@@ -3418,7 +3394,7 @@ static int talitos_probe(struct platform_device *ofdev)
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,sec1.0")) {
|
||||
if (has_ftr_sec1(priv)) {
|
||||
if (priv->num_channels == 1)
|
||||
tasklet_init(&priv->done_task[0], talitos1_done_ch0,
|
||||
(unsigned long)dev);
|
||||
|
@@ -1,31 +1,8 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause */
|
||||
/*
|
||||
* Freescale SEC (talitos) device register and descriptor header defines
|
||||
*
|
||||
* Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#define TALITOS_TIMEOUT 100000
|
||||
@@ -65,6 +42,34 @@ struct talitos_desc {
|
||||
|
||||
#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
|
||||
|
||||
/*
|
||||
* talitos_edesc - s/w-extended descriptor
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @dst_nents: number of segments in output scatterlist
|
||||
* @iv_dma: dma address of iv for checking continuity and link table
|
||||
* @dma_len: length of dma mapped link_tbl space
|
||||
* @dma_link_tbl: bus physical address of link_tbl/buf
|
||||
* @desc: h/w descriptor
|
||||
* @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
|
||||
* @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
|
||||
*
|
||||
* if decrypting (with authcheck), or either one of src_nents or dst_nents
|
||||
* is greater than 1, an integrity check value is concatenated to the end
|
||||
* of link_tbl data
|
||||
*/
|
||||
struct talitos_edesc {
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
dma_addr_t iv_dma;
|
||||
int dma_len;
|
||||
dma_addr_t dma_link_tbl;
|
||||
struct talitos_desc desc;
|
||||
union {
|
||||
struct talitos_ptr link_tbl[0];
|
||||
u8 buf[0];
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* talitos_request - descriptor submission request
|
||||
* @desc: descriptor pointer (kernel virtual)
|
||||
@@ -150,12 +155,6 @@ struct talitos_private {
|
||||
bool rng_registered;
|
||||
};
|
||||
|
||||
extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||
void (*callback)(struct device *dev,
|
||||
struct talitos_desc *desc,
|
||||
void *context, int error),
|
||||
void *context);
|
||||
|
||||
/* .features flag */
|
||||
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
|
||||
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
|
||||
@@ -170,13 +169,11 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||
*/
|
||||
static inline bool has_ftr_sec1(struct talitos_private *priv)
|
||||
{
|
||||
#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2)
|
||||
return priv->features & TALITOS_FTR_SEC1 ? true : false;
|
||||
#elif defined(CONFIG_CRYPTO_DEV_TALITOS1)
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) &&
|
||||
IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2))
|
||||
return priv->features & TALITOS_FTR_SEC1;
|
||||
|
||||
return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -412,5 +409,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
|
||||
|
||||
/* link table extent field bits */
|
||||
#define DESC_PTR_LNKTBL_JUMP 0x80
|
||||
#define DESC_PTR_LNKTBL_RETURN 0x02
|
||||
#define DESC_PTR_LNKTBL_RET 0x02
|
||||
#define DESC_PTR_LNKTBL_NEXT 0x01
|
||||
|
@@ -7,64 +7,52 @@
|
||||
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_cbc_ctx {
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct crypto_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
struct aes_key dec_key;
|
||||
};
|
||||
|
||||
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
|
||||
static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
|
||||
fallback = crypto_alloc_skcipher("cbc(aes)", 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
alg, PTR_ERR(fallback));
|
||||
pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
|
||||
PTR_ERR(fallback));
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
||||
crypto_skcipher_reqsize(fallback));
|
||||
ctx->fallback = fallback;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
|
||||
static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int ret;
|
||||
struct blkcipher_walk walk;
|
||||
struct p8_aes_cbc_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->enc_key, walk.iv, 1);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
*subreq = *req;
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
return enc ? crypto_skcipher_encrypt(subreq) :
|
||||
crypto_skcipher_decrypt(subreq);
|
||||
}
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
enc ? &ctx->enc_key : &ctx->dec_key,
|
||||
walk.iv, enc);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int p8_aes_cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
int ret;
|
||||
struct blkcipher_walk walk;
|
||||
struct p8_aes_cbc_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_decrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->dec_key, walk.iv, 0);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return p8_aes_cbc_crypt(req, 1);
|
||||
}
|
||||
|
||||
static int p8_aes_cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return p8_aes_cbc_crypt(req, 0);
|
||||
}
|
||||
|
||||
struct crypto_alg p8_aes_cbc_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "p8_aes_cbc",
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_priority = 2000,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_alignmask = 0,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
|
||||
.cra_init = p8_aes_cbc_init,
|
||||
.cra_exit = p8_aes_cbc_exit,
|
||||
.cra_blkcipher = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = p8_aes_cbc_setkey,
|
||||
.encrypt = p8_aes_cbc_encrypt,
|
||||
.decrypt = p8_aes_cbc_decrypt,
|
||||
},
|
||||
struct skcipher_alg p8_aes_cbc_alg = {
|
||||
.base.cra_name = "cbc(aes)",
|
||||
.base.cra_driver_name = "p8_aes_cbc",
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_priority = 2000,
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
|
||||
.setkey = p8_aes_cbc_setkey,
|
||||
.encrypt = p8_aes_cbc_encrypt,
|
||||
.decrypt = p8_aes_cbc_decrypt,
|
||||
.init = p8_aes_cbc_init,
|
||||
.exit = p8_aes_cbc_exit,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
@@ -7,62 +7,51 @@
|
||||
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_ctr_ctx {
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct crypto_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
};
|
||||
|
||||
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
|
||||
static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
fallback = crypto_alloc_skcipher("ctr(aes)", 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
alg, PTR_ERR(fallback));
|
||||
pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
|
||||
PTR_ERR(fallback));
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
||||
crypto_skcipher_reqsize(fallback));
|
||||
ctx->fallback = fallback;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
|
||||
static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
@@ -72,13 +61,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
|
||||
struct blkcipher_walk *walk)
|
||||
static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[AES_BLOCK_SIZE];
|
||||
@@ -98,77 +87,63 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
|
||||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int p8_aes_ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int ret;
|
||||
u64 inc;
|
||||
struct blkcipher_walk walk;
|
||||
struct p8_aes_ctr_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
(nbytes &
|
||||
AES_BLOCK_MASK) /
|
||||
AES_BLOCK_SIZE,
|
||||
&ctx->enc_key,
|
||||
walk.iv);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
/* We need to update IV mostly for last bytes/round */
|
||||
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
|
||||
if (inc > 0)
|
||||
while (inc--)
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
p8_aes_ctr_final(ctx, &walk);
|
||||
ret = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
*subreq = *req;
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
return crypto_skcipher_encrypt(subreq);
|
||||
}
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes / AES_BLOCK_SIZE,
|
||||
&ctx->enc_key, walk.iv);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
do {
|
||||
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
||||
} while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
|
||||
|
||||
ret = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
if (nbytes) {
|
||||
p8_aes_ctr_final(ctx, &walk);
|
||||
ret = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct crypto_alg p8_aes_ctr_alg = {
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "p8_aes_ctr",
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_priority = 2000,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_alignmask = 0,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
|
||||
.cra_init = p8_aes_ctr_init,
|
||||
.cra_exit = p8_aes_ctr_exit,
|
||||
.cra_blkcipher = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = p8_aes_ctr_setkey,
|
||||
.encrypt = p8_aes_ctr_crypt,
|
||||
.decrypt = p8_aes_ctr_crypt,
|
||||
},
|
||||
struct skcipher_alg p8_aes_ctr_alg = {
|
||||
.base.cra_name = "ctr(aes)",
|
||||
.base.cra_driver_name = "p8_aes_ctr",
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_priority = 2000,
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
|
||||
.setkey = p8_aes_ctr_setkey,
|
||||
.encrypt = p8_aes_ctr_crypt,
|
||||
.decrypt = p8_aes_ctr_crypt,
|
||||
.init = p8_aes_ctr_init,
|
||||
.exit = p8_aes_ctr_exit,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
@@ -7,67 +7,56 @@
|
||||
* Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/skcipher.h>
|
||||
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_xts_ctx {
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct crypto_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
struct aes_key dec_key;
|
||||
struct aes_key tweak_key;
|
||||
};
|
||||
|
||||
static int p8_aes_xts_init(struct crypto_tfm *tfm)
|
||||
static int p8_aes_xts_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
fallback = crypto_alloc_skcipher("xts(aes)", 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK |
|
||||
CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
alg, PTR_ERR(fallback));
|
||||
pr_err("Failed to allocate xts(aes) fallback: %ld\n",
|
||||
PTR_ERR(fallback));
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
||||
crypto_skcipher_reqsize(fallback));
|
||||
ctx->fallback = fallback;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p8_aes_xts_exit(struct crypto_tfm *tfm)
|
||||
static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ret = xts_check_key(tfm, key, keylen);
|
||||
ret = xts_verify_key(tfm, key, keylen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -81,100 +70,90 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int nbytes, int enc)
|
||||
static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
|
||||
{
|
||||
int ret;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
u8 tweak[AES_BLOCK_SIZE];
|
||||
u8 *iv;
|
||||
struct blkcipher_walk walk;
|
||||
struct p8_aes_xts_ctx *ctx =
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
int ret;
|
||||
|
||||
if (!crypto_simd_usable()) {
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
|
||||
skcipher_request_zero(req);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
||||
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
*subreq = *req;
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
return enc ? crypto_skcipher_encrypt(subreq) :
|
||||
crypto_skcipher_decrypt(subreq);
|
||||
}
|
||||
|
||||
ret = skcipher_walk_virt(&walk, req, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
while ((nbytes = walk.nbytes) != 0) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
iv = walk.iv;
|
||||
memset(tweak, 0, AES_BLOCK_SIZE);
|
||||
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
|
||||
|
||||
if (enc)
|
||||
aes_p8_xts_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
&ctx->enc_key, NULL, tweak);
|
||||
else
|
||||
aes_p8_xts_decrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
round_down(nbytes, AES_BLOCK_SIZE),
|
||||
&ctx->dec_key, NULL, tweak);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
if (enc)
|
||||
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
|
||||
else
|
||||
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int p8_aes_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
|
||||
return p8_aes_xts_crypt(req, 1);
|
||||
}
|
||||
|
||||
static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int p8_aes_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
|
||||
return p8_aes_xts_crypt(req, 0);
|
||||
}
|
||||
|
||||
struct crypto_alg p8_aes_xts_alg = {
|
||||
.cra_name = "xts(aes)",
|
||||
.cra_driver_name = "p8_aes_xts",
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_priority = 2000,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_alignmask = 0,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
|
||||
.cra_init = p8_aes_xts_init,
|
||||
.cra_exit = p8_aes_xts_exit,
|
||||
.cra_blkcipher = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.setkey = p8_aes_xts_setkey,
|
||||
.encrypt = p8_aes_xts_encrypt,
|
||||
.decrypt = p8_aes_xts_decrypt,
|
||||
}
|
||||
struct skcipher_alg p8_aes_xts_alg = {
|
||||
.base.cra_name = "xts(aes)",
|
||||
.base.cra_driver_name = "p8_aes_xts",
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.base.cra_priority = 2000,
|
||||
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
|
||||
.setkey = p8_aes_xts_setkey,
|
||||
.encrypt = p8_aes_xts_encrypt,
|
||||
.decrypt = p8_aes_xts_decrypt,
|
||||
.init = p8_aes_xts_init,
|
||||
.exit = p8_aes_xts_exit,
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
};
|
||||
|
@@ -2,8 +2,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <crypto/aes.h>
|
||||
|
||||
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
|
||||
|
||||
struct aes_key {
|
||||
u8 key[AES_MAX_KEYLENGTH];
|
||||
int rounds;
|
||||
|
@@ -1286,6 +1286,24 @@ ___
|
||||
|
||||
#########################################################################
|
||||
{{{ # CTR procedure[s] #
|
||||
|
||||
####################### WARNING: Here be dragons! #######################
|
||||
#
|
||||
# This code is written as 'ctr32', based on a 32-bit counter used
|
||||
# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
|
||||
# a 128-bit counter.
|
||||
#
|
||||
# This leads to subtle changes from the upstream code: the counter
|
||||
# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
|
||||
# both the bulk (8 blocks at a time) path, and in the individual block
|
||||
# path. Be aware of this when doing updates.
|
||||
#
|
||||
# See:
|
||||
# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
|
||||
# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
|
||||
# https://github.com/openssl/openssl/pull/8942
|
||||
#
|
||||
#########################################################################
|
||||
my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
|
||||
my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
|
||||
my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
|
||||
@@ -1357,7 +1375,7 @@ Loop_ctr32_enc:
|
||||
addi $idx,$idx,16
|
||||
bdnz Loop_ctr32_enc
|
||||
|
||||
vadduqm $ivec,$ivec,$one
|
||||
vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
|
||||
vmr $dat,$inptail
|
||||
lvx $inptail,0,$inp
|
||||
addi $inp,$inp,16
|
||||
@@ -1501,7 +1519,7 @@ Load_ctr32_enc_key:
|
||||
$SHL $len,$len,4
|
||||
|
||||
vadduqm $out1,$ivec,$one # counter values ...
|
||||
vadduqm $out2,$ivec,$two
|
||||
vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
|
||||
vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
|
||||
le?li $idx,8
|
||||
vadduqm $out3,$out1,$two
|
||||
|
@@ -15,54 +15,58 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
|
||||
extern struct shash_alg p8_ghash_alg;
|
||||
extern struct crypto_alg p8_aes_alg;
|
||||
extern struct crypto_alg p8_aes_cbc_alg;
|
||||
extern struct crypto_alg p8_aes_ctr_alg;
|
||||
extern struct crypto_alg p8_aes_xts_alg;
|
||||
static struct crypto_alg *algs[] = {
|
||||
&p8_aes_alg,
|
||||
&p8_aes_cbc_alg,
|
||||
&p8_aes_ctr_alg,
|
||||
&p8_aes_xts_alg,
|
||||
NULL,
|
||||
};
|
||||
extern struct skcipher_alg p8_aes_cbc_alg;
|
||||
extern struct skcipher_alg p8_aes_ctr_alg;
|
||||
extern struct skcipher_alg p8_aes_xts_alg;
|
||||
|
||||
static int __init p8_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
struct crypto_alg **alg_it;
|
||||
|
||||
for (alg_it = algs; *alg_it; alg_it++) {
|
||||
ret = crypto_register_alg(*alg_it);
|
||||
printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
|
||||
(*alg_it)->cra_name, ret);
|
||||
if (ret) {
|
||||
for (alg_it--; alg_it >= algs; alg_it--)
|
||||
crypto_unregister_alg(*alg_it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
int ret;
|
||||
|
||||
ret = crypto_register_shash(&p8_ghash_alg);
|
||||
if (ret) {
|
||||
for (alg_it = algs; *alg_it; alg_it++)
|
||||
crypto_unregister_alg(*alg_it);
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = crypto_register_alg(&p8_aes_alg);
|
||||
if (ret)
|
||||
goto err_unregister_ghash;
|
||||
|
||||
ret = crypto_register_skcipher(&p8_aes_cbc_alg);
|
||||
if (ret)
|
||||
goto err_unregister_aes;
|
||||
|
||||
ret = crypto_register_skcipher(&p8_aes_ctr_alg);
|
||||
if (ret)
|
||||
goto err_unregister_aes_cbc;
|
||||
|
||||
ret = crypto_register_skcipher(&p8_aes_xts_alg);
|
||||
if (ret)
|
||||
goto err_unregister_aes_ctr;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister_aes_ctr:
|
||||
crypto_unregister_skcipher(&p8_aes_ctr_alg);
|
||||
err_unregister_aes_cbc:
|
||||
crypto_unregister_skcipher(&p8_aes_cbc_alg);
|
||||
err_unregister_aes:
|
||||
crypto_unregister_alg(&p8_aes_alg);
|
||||
err_unregister_ghash:
|
||||
crypto_unregister_shash(&p8_ghash_alg);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit p8_exit(void)
|
||||
{
|
||||
struct crypto_alg **alg_it;
|
||||
|
||||
for (alg_it = algs; *alg_it; alg_it++) {
|
||||
printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
|
||||
crypto_unregister_alg(*alg_it);
|
||||
}
|
||||
crypto_unregister_skcipher(&p8_aes_xts_alg);
|
||||
crypto_unregister_skcipher(&p8_aes_ctr_alg);
|
||||
crypto_unregister_skcipher(&p8_aes_cbc_alg);
|
||||
crypto_unregister_alg(&p8_aes_alg);
|
||||
crypto_unregister_shash(&p8_ghash_alg);
|
||||
}
|
||||
|
||||
|
Ссылка в новой задаче
Block a user