Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Remove VLA usage - Add cryptostat user-space interface - Add notifier for new crypto algorithms Algorithms: - Add OFB mode - Remove speck Drivers: - Remove x86/sha*-mb as they are buggy - Remove pcbc(aes) from x86/aesni - Improve performance of arm/ghash-ce by up to 85% - Implement CTS-CBC in arm64/aes-blk, faster by up to 50% - Remove PMULL based arm64/crc32 driver - Use PMULL in arm64/crct10dif - Add aes-ctr support in s5p-sss - Add caam/qi2 driver Others: - Pick better transform if one becomes available in crc-t10dif" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits) crypto: chelsio - Update ntx queue received from cxgb4 crypto: ccree - avoid implicit enum conversion crypto: caam - add SPDX license identifier to all files crypto: caam/qi - simplify CGR allocation, freeing crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static crypto: arm64/aes-blk - ensure XTS mask is always loaded crypto: testmgr - fix sizeof() on COMP_BUF_SIZE crypto: chtls - remove set but not used variable 'csk' crypto: axis - fix platform_no_drv_owner.cocci warnings crypto: x86/aes-ni - fix build error following fpu template removal crypto: arm64/aes - fix handling sub-block CTS-CBC inputs crypto: caam/qi2 - avoid double export crypto: mxs-dcp - Fix AES issues crypto: mxs-dcp - Fix SHA null hashes and output length crypto: mxs-dcp - Implement sha import/export crypto: aegis/generic - fix for big endian systems crypto: morus/generic - fix for big endian systems crypto: lrw - fix rebase error after out of bounds fix crypto: cavium/nitrox - use pci_alloc_irq_vectors() while enabling MSI-X. crypto: cavium/nitrox - NITROX command queue changes. ...
This commit is contained in:
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
|
||||
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
|
||||
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
|
||||
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
@@ -6,10 +7,6 @@
|
||||
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
|
||||
* Author: Nicolas Royer <nicolas@eukrea.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* Some ideas are from omap-aes.c driver.
|
||||
*/
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
|
||||
*
|
||||
@@ -5,18 +6,6 @@
|
||||
*
|
||||
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
|
||||
*/
|
||||
|
||||
|
@@ -1,18 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Microchip / Atmel ECC (I2C) driver.
|
||||
*
|
||||
* Copyright (c) 2017, Microchip Technology Inc.
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitrev.h>
|
||||
|
@@ -1,19 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2017, Microchip Technology Inc.
|
||||
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ATMEL_ECC_H__
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
@@ -6,10 +7,6 @@
|
||||
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
|
||||
* Author: Nicolas Royer <nicolas@eukrea.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* Some ideas are from omap-sham.c drivers.
|
||||
*/
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
@@ -6,10 +7,6 @@
|
||||
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
|
||||
* Author: Nicolas Royer <nicolas@eukrea.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* Some ideas are from omap-aes.c drivers.
|
||||
*/
|
||||
|
||||
|
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
|
||||
size_t key_length;
|
||||
u32 key_md;
|
||||
int crypto_type;
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
};
|
||||
|
||||
struct artpec6_crypto_aead_hw_ctx {
|
||||
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
|
||||
pr_debug("counter %x will overflow (nblks %u), falling back\n",
|
||||
counter, counter + nblks);
|
||||
|
||||
ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
|
||||
ctx->key_length);
|
||||
ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
|
||||
ctx->key_length);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
{
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
|
||||
0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
ctx->fallback =
|
||||
crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
|
||||
0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->fallback))
|
||||
return PTR_ERR(ctx->fallback);
|
||||
|
||||
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
artpec6_crypto_aes_exit(tfm);
|
||||
}
|
||||
|
||||
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
|
||||
.remove = artpec6_crypto_remove,
|
||||
.driver = {
|
||||
.name = "artpec6-crypto",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = artpec6_crypto_of_match,
|
||||
},
|
||||
};
|
||||
|
@@ -1,7 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
config CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
tristate
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM
|
||||
tristate "Freescale CAAM-Multicore driver backend"
|
||||
tristate "Freescale CAAM-Multicore platform driver backend"
|
||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||
select SOC_BUS
|
||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
help
|
||||
Enables the driver module for Freescale's Cryptographic Accelerator
|
||||
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
|
||||
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called caam.
|
||||
|
||||
if CRYPTO_DEV_FSL_CAAM
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
bool "Enable debug output in CAAM driver"
|
||||
help
|
||||
Selecting this will enable printing of various debug
|
||||
information in the CAAM driver.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_JR
|
||||
tristate "Freescale CAAM Job Ring driver backend"
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
default y
|
||||
help
|
||||
Enables the driver module for Job Rings which are part of
|
||||
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called caam_jr.
|
||||
|
||||
if CRYPTO_DEV_FSL_CAAM_JR
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
|
||||
int "Job Ring size"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
range 2 9
|
||||
default "9"
|
||||
help
|
||||
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_INTC
|
||||
bool "Job Ring interrupt coalescing"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
help
|
||||
Enable the Job Ring's interrupt coalescing feature.
|
||||
|
||||
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
tristate "Register algorithm implementations with the Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
||||
tristate "Queue Interface as Crypto API backend"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
|
||||
depends on FSL_DPAA && NET
|
||||
default y
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_BLKCIPHER
|
||||
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
tristate "Register hash algorithm implementations with Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
tristate "Register public key cryptography implementations with Crypto API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_RSA
|
||||
help
|
||||
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
tristate "Register caam device for hwrng API"
|
||||
depends on CRYPTO_DEV_FSL_CAAM_JR
|
||||
default y
|
||||
select CRYPTO_RNG
|
||||
select HW_RANDOM
|
||||
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called caamrng.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_DEBUG
|
||||
bool "Enable debug output in CAAM driver"
|
||||
depends on CRYPTO_DEV_FSL_CAAM
|
||||
endif # CRYPTO_DEV_FSL_CAAM_JR
|
||||
|
||||
endif # CRYPTO_DEV_FSL_CAAM
|
||||
|
||||
config CRYPTO_DEV_FSL_DPAA2_CAAM
|
||||
tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
|
||||
depends on FSL_MC_DPIO
|
||||
depends on NETDEVICES
|
||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Selecting this will enable printing of various debug
|
||||
information in the CAAM driver.
|
||||
CAAM driver for QorIQ Data Path Acceleration Architecture 2.
|
||||
It handles DPSECI DPAA2 objects that sit on the Management Complex
|
||||
(MC) fsl-mc bus.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called dpaa2_caam.
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
|
||||
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
|
||||
CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
|
||||
CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
|
||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
|
||||
CRYPTO_DEV_FSL_DPAA2_CAAM)
|
||||
|
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
|
||||
ccflags-y := -DDEBUG
|
||||
endif
|
||||
|
||||
ccflags-y += -DVERSION=\"\"
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
|
||||
|
||||
caam-objs := ctrl.o
|
||||
caam_jr-objs := jr.o key_gen.o error.o
|
||||
caam_jr-objs := jr.o key_gen.o
|
||||
caam_pkc-y := caampkc.o pkc_desc.o
|
||||
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
|
||||
ccflags-y += -DCONFIG_CAAM_QI
|
||||
caam-objs += qi.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
|
||||
|
||||
dpaa2_caam-y := caamalg_qi2.o dpseci.o
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Shared descriptors for aead, ablkcipher algorithms
|
||||
* Shared descriptors for aead, skcipher algorithms
|
||||
*
|
||||
* Copyright 2016 NXP
|
||||
* Copyright 2016-2018 NXP
|
||||
*/
|
||||
|
||||
#include "compat.h"
|
||||
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
|
||||
|
||||
/*
|
||||
* For ablkcipher encrypt and decrypt, read from req->src and
|
||||
* write to req->dst
|
||||
*/
|
||||
static inline void ablkcipher_append_src_dst(u32 *desc)
|
||||
/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
|
||||
static inline void skcipher_append_src_dst(u32 *desc)
|
||||
{
|
||||
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
|
||||
}
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
|
||||
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
|
||||
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
|
||||
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
|
||||
* @ctx1_iv_off: IV offset in CONTEXT1 register
|
||||
*/
|
||||
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off)
|
||||
void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ablkcipher enc shdesc@" __stringify(__LINE__)": ",
|
||||
"skcipher enc shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
|
||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
|
||||
* cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
|
||||
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
|
||||
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
|
||||
* @ctx1_iv_off: IV offset in CONTEXT1 register
|
||||
*/
|
||||
void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off)
|
||||
void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_dec_op1(desc, cdata->algtype);
|
||||
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ablkcipher dec shdesc@" __stringify(__LINE__)": ",
|
||||
"skcipher dec shdesc@" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
|
||||
EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
|
||||
* with HW-generated initialization vector.
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
|
||||
* with OP_ALG_AAI_CBC.
|
||||
* @ivsize: initialization vector size
|
||||
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
|
||||
* @ctx1_iv_off: IV offset in CONTEXT1 register
|
||||
*/
|
||||
void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off)
|
||||
{
|
||||
u32 *key_jump_cmd, geniv;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
|
||||
/* Skip if already shared */
|
||||
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
|
||||
/* Load class1 key only */
|
||||
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
|
||||
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
|
||||
|
||||
/* Load Nonce into CONTEXT1 reg */
|
||||
if (is_rfc3686) {
|
||||
const u8 *nonce = cdata->key_virt + cdata->keylen;
|
||||
|
||||
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
|
||||
LDST_CLASS_IND_CCB |
|
||||
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
|
||||
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
|
||||
MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
|
||||
(CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
|
||||
}
|
||||
set_jump_tgt_here(desc, key_jump_cmd);
|
||||
|
||||
/* Generate IV */
|
||||
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
|
||||
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
|
||||
(ivsize << NFIFOENTRY_DLEN_SHIFT);
|
||||
append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
|
||||
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
|
||||
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
|
||||
append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
|
||||
MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
|
||||
(ctx1_iv_off << MOVE_OFFSET_SHIFT));
|
||||
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
|
||||
|
||||
/* Copy generated IV to memory */
|
||||
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
|
||||
LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
|
||||
|
||||
/* Load Counter into CONTEXT1 reg */
|
||||
if (is_rfc3686)
|
||||
append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT |
|
||||
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
|
||||
LDST_OFFSET_SHIFT));
|
||||
|
||||
if (ctx1_iv_off)
|
||||
append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
|
||||
(1 << JUMP_OFFSET_SHIFT));
|
||||
|
||||
/* Load operation */
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
|
||||
* descriptor
|
||||
* cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
|
||||
*/
|
||||
void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
|
||||
void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
|
||||
{
|
||||
__be64 sector_size = cpu_to_be64(512);
|
||||
u32 *key_jump_cmd;
|
||||
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
|
||||
"xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
|
||||
* descriptor
|
||||
* cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
|
||||
*/
|
||||
void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
|
||||
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
|
||||
{
|
||||
__be64 sector_size = cpu_to_be64(512);
|
||||
u32 *key_jump_cmd;
|
||||
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
|
||||
append_dec_op1(desc, cdata->algtype);
|
||||
|
||||
/* Perform operation */
|
||||
ablkcipher_append_src_dst(desc);
|
||||
skcipher_append_src_dst(desc);
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR,
|
||||
"xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
|
||||
"xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
|
||||
EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM descriptor support");
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Shared descriptors for aead, ablkcipher algorithms
|
||||
* Shared descriptors for aead, skcipher algorithms
|
||||
*
|
||||
* Copyright 2016 NXP
|
||||
*/
|
||||
@@ -42,10 +42,10 @@
|
||||
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
|
||||
20 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
|
||||
15 * CAAM_CMD_SZ)
|
||||
|
||||
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
|
||||
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
const u32 ctx1_iv_off);
|
||||
void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
|
||||
|
||||
void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
|
||||
|
||||
void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
|
||||
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
|
||||
|
||||
#endif /* _CAAMALG_DESC_H_ */
|
||||
|
File diff suppressed because it is too large
Load Diff
5165
drivers/crypto/caam/caamalg_qi2.c
Normal file
5165
drivers/crypto/caam/caamalg_qi2.c
Normal file
File diff suppressed because it is too large
Load Diff
223
drivers/crypto/caam/caamalg_qi2.h
Normal file
223
drivers/crypto/caam/caamalg_qi2.h
Normal file
@@ -0,0 +1,223 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2015-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017-2018 NXP
|
||||
*/
|
||||
|
||||
#ifndef _CAAMALG_QI2_H_
|
||||
#define _CAAMALG_QI2_H_
|
||||
|
||||
#include <soc/fsl/dpaa2-io.h>
|
||||
#include <soc/fsl/dpaa2-fd.h>
|
||||
#include <linux/threads.h>
|
||||
#include "dpseci.h"
|
||||
#include "desc_constr.h"
|
||||
|
||||
#define DPAA2_CAAM_STORE_SIZE 16
|
||||
/* NAPI weight *must* be a multiple of the store size. */
|
||||
#define DPAA2_CAAM_NAPI_WEIGHT 64
|
||||
|
||||
/* The congestion entrance threshold was chosen so that on LS2088
|
||||
* we support the maximum throughput for the available memory
|
||||
*/
|
||||
#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
|
||||
#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
|
||||
|
||||
/**
|
||||
* dpaa2_caam_priv - driver private data
|
||||
* @dpseci_id: DPSECI object unique ID
|
||||
* @major_ver: DPSECI major version
|
||||
* @minor_ver: DPSECI minor version
|
||||
* @dpseci_attr: DPSECI attributes
|
||||
* @sec_attr: SEC engine attributes
|
||||
* @rx_queue_attr: array of Rx queue attributes
|
||||
* @tx_queue_attr: array of Tx queue attributes
|
||||
* @cscn_mem: pointer to memory region containing the congestion SCN
|
||||
* it's size is larger than to accommodate alignment
|
||||
* @cscn_mem_aligned: pointer to congestion SCN; it is computed as
|
||||
* PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
|
||||
* @cscn_dma: dma address used by the QMAN to write CSCN messages
|
||||
* @dev: device associated with the DPSECI object
|
||||
* @mc_io: pointer to MC portal's I/O object
|
||||
* @domain: IOMMU domain
|
||||
* @ppriv: per CPU pointers to privata data
|
||||
*/
|
||||
struct dpaa2_caam_priv {
|
||||
int dpsec_id;
|
||||
|
||||
u16 major_ver;
|
||||
u16 minor_ver;
|
||||
|
||||
struct dpseci_attr dpseci_attr;
|
||||
struct dpseci_sec_attr sec_attr;
|
||||
struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
|
||||
struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
|
||||
int num_pairs;
|
||||
|
||||
/* congestion */
|
||||
void *cscn_mem;
|
||||
void *cscn_mem_aligned;
|
||||
dma_addr_t cscn_dma;
|
||||
|
||||
struct device *dev;
|
||||
struct fsl_mc_io *mc_io;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
|
||||
};
|
||||
|
||||
/**
|
||||
* dpaa2_caam_priv_per_cpu - per CPU private data
|
||||
* @napi: napi structure
|
||||
* @net_dev: netdev used by napi
|
||||
* @req_fqid: (virtual) request (Tx / enqueue) FQID
|
||||
* @rsp_fqid: (virtual) response (Rx / dequeue) FQID
|
||||
* @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
|
||||
* @nctx: notification context of response FQ
|
||||
* @store: where dequeued frames are stored
|
||||
* @priv: backpointer to dpaa2_caam_priv
|
||||
*/
|
||||
struct dpaa2_caam_priv_per_cpu {
|
||||
struct napi_struct napi;
|
||||
struct net_device net_dev;
|
||||
int req_fqid;
|
||||
int rsp_fqid;
|
||||
int prio;
|
||||
struct dpaa2_io_notification_ctx nctx;
|
||||
struct dpaa2_io_store *store;
|
||||
struct dpaa2_caam_priv *priv;
|
||||
};
|
||||
|
||||
/*
|
||||
* The CAAM QI hardware constructs a job descriptor which points
|
||||
* to shared descriptor (as pointed by context_a of FQ to CAAM).
|
||||
* When the job descriptor is executed by deco, the whole job
|
||||
* descriptor together with shared descriptor gets loaded in
|
||||
* deco buffer which is 64 words long (each 32-bit).
|
||||
*
|
||||
* The job descriptor constructed by QI hardware has layout:
|
||||
*
|
||||
* HEADER (1 word)
|
||||
* Shdesc ptr (1 or 2 words)
|
||||
* SEQ_OUT_PTR (1 word)
|
||||
* Out ptr (1 or 2 words)
|
||||
* Out length (1 word)
|
||||
* SEQ_IN_PTR (1 word)
|
||||
* In ptr (1 or 2 words)
|
||||
* In length (1 word)
|
||||
*
|
||||
* The shdesc ptr is used to fetch shared descriptor contents
|
||||
* into deco buffer.
|
||||
*
|
||||
* Apart from shdesc contents, the total number of words that
|
||||
* get loaded in deco buffer are '8' or '11'. The remaining words
|
||||
* in deco buffer can be used for storing shared descriptor.
|
||||
*/
|
||||
#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
|
||||
|
||||
/* Length of a single buffer in the QI driver memory cache */
|
||||
#define CAAM_QI_MEMCACHE_SIZE 512
|
||||
|
||||
/*
|
||||
* aead_edesc - s/w-extended aead descriptor
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @dst_nents: number of segments in output scatterlist
|
||||
* @iv_dma: dma address of iv for checking continuity and link table
|
||||
* @qm_sg_bytes: length of dma mapped h/w link table
|
||||
* @qm_sg_dma: bus physical mapped address of h/w link table
|
||||
* @assoclen: associated data length, in CAAM endianness
|
||||
* @assoclen_dma: bus physical mapped address of req->assoclen
|
||||
* @sgt: the h/w link table, followed by IV
|
||||
*/
|
||||
struct aead_edesc {
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
dma_addr_t iv_dma;
|
||||
int qm_sg_bytes;
|
||||
dma_addr_t qm_sg_dma;
|
||||
unsigned int assoclen;
|
||||
dma_addr_t assoclen_dma;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* skcipher_edesc - s/w-extended skcipher descriptor
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @dst_nents: number of segments in output scatterlist
|
||||
* @iv_dma: dma address of iv for checking continuity and link table
|
||||
* @qm_sg_bytes: length of dma mapped qm_sg space
|
||||
* @qm_sg_dma: I/O virtual address of h/w link table
|
||||
* @sgt: the h/w link table, followed by IV
|
||||
*/
|
||||
struct skcipher_edesc {
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
dma_addr_t iv_dma;
|
||||
int qm_sg_bytes;
|
||||
dma_addr_t qm_sg_dma;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* ahash_edesc - s/w-extended ahash descriptor
|
||||
* @dst_dma: I/O virtual address of req->result
|
||||
* @qm_sg_dma: I/O virtual address of h/w link table
|
||||
* @src_nents: number of segments in input scatterlist
|
||||
* @qm_sg_bytes: length of dma mapped qm_sg space
|
||||
* @sgt: pointer to h/w link table
|
||||
*/
|
||||
struct ahash_edesc {
|
||||
dma_addr_t dst_dma;
|
||||
dma_addr_t qm_sg_dma;
|
||||
int src_nents;
|
||||
int qm_sg_bytes;
|
||||
struct dpaa2_sg_entry sgt[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* caam_flc - Flow Context (FLC)
|
||||
* @flc: Flow Context options
|
||||
* @sh_desc: Shared Descriptor
|
||||
*/
|
||||
struct caam_flc {
|
||||
u32 flc[16];
|
||||
u32 sh_desc[MAX_SDLEN];
|
||||
} ____cacheline_aligned;
|
||||
|
||||
enum optype {
|
||||
ENCRYPT = 0,
|
||||
DECRYPT,
|
||||
NUM_OP
|
||||
};
|
||||
|
||||
/**
|
||||
* caam_request - the request structure the driver application should fill while
|
||||
* submitting a job to driver.
|
||||
* @fd_flt: Frame list table defining input and output
|
||||
* fd_flt[0] - FLE pointing to output buffer
|
||||
* fd_flt[1] - FLE pointing to input buffer
|
||||
* @fd_flt_dma: DMA address for the frame list table
|
||||
* @flc: Flow Context
|
||||
* @flc_dma: I/O virtual address of Flow Context
|
||||
* @cbk: Callback function to invoke when job is completed
|
||||
* @ctx: arbit context attached with request by the application
|
||||
* @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
|
||||
*/
|
||||
struct caam_request {
|
||||
struct dpaa2_fl_entry fd_flt[2];
|
||||
dma_addr_t fd_flt_dma;
|
||||
struct caam_flc *flc;
|
||||
dma_addr_t flc_dma;
|
||||
void (*cbk)(void *ctx, u32 err);
|
||||
void *ctx;
|
||||
void *edesc;
|
||||
};
|
||||
|
||||
/**
|
||||
* dpaa2_caam_enqueue() - enqueue a crypto request
|
||||
* @dev: device associated with the DPSECI object
|
||||
* @req: pointer to caam_request
|
||||
*/
|
||||
int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
|
||||
|
||||
#endif /* _CAAMALG_QI2_H_ */
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* caam - Freescale FSL CAAM support for ahash functions of crypto API
|
||||
*
|
||||
@@ -62,6 +63,7 @@
|
||||
#include "error.h"
|
||||
#include "sg_sw_sec4.h"
|
||||
#include "key_gen.h"
|
||||
#include "caamhash_desc.h"
|
||||
|
||||
#define CAAM_CRA_PRIORITY 3000
|
||||
|
||||
@@ -71,14 +73,6 @@
|
||||
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
|
||||
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
|
||||
CAAM_MAX_HASH_KEY_SIZE)
|
||||
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
|
||||
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For ahash update, final and finup (import_ctx = true)
|
||||
* import context, read and write to seqout
|
||||
* For ahash firsts and digest (import_ctx = false)
|
||||
* read and write to seqout
|
||||
*/
|
||||
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
|
||||
struct caam_hash_ctx *ctx, bool import_ctx,
|
||||
int era)
|
||||
{
|
||||
u32 op = ctx->adata.algtype;
|
||||
u32 *skip_key_load;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Append key if it has been set; ahash update excluded */
|
||||
if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
|
||||
/* Skip key loading if already shared */
|
||||
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
|
||||
if (era < 6)
|
||||
append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
|
||||
ctx->adata.keylen, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
else
|
||||
append_proto_dkp(desc, &ctx->adata);
|
||||
|
||||
set_jump_tgt_here(desc, skip_key_load);
|
||||
|
||||
op |= OP_ALG_AAI_HMAC_PRECOMP;
|
||||
}
|
||||
|
||||
/* If needed, import context from software */
|
||||
if (import_ctx)
|
||||
append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
||||
|
||||
/*
|
||||
* Load from buf and/or src and write to req->result or state->context
|
||||
* Calculate remaining bytes to read
|
||||
*/
|
||||
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
/* Read remaining bytes */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
|
||||
FIFOLD_TYPE_MSG | KEY_VLF);
|
||||
/* Store class2 context bytes */
|
||||
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
}
|
||||
|
||||
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
{
|
||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_update shared descriptor */
|
||||
desc = ctx->sh_desc_update;
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
|
||||
ctrlpriv->era);
|
||||
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
|
||||
ctx->ctx_len, true, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_update_first shared descriptor */
|
||||
desc = ctx->sh_desc_update_first;
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
|
||||
ctrlpriv->era);
|
||||
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
|
||||
ctx->ctx_len, false, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_final shared descriptor */
|
||||
desc = ctx->sh_desc_fin;
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
|
||||
ctrlpriv->era);
|
||||
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
|
||||
ctx->ctx_len, true, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
|
||||
|
||||
/* ahash_digest shared descriptor */
|
||||
desc = ctx->sh_desc_digest;
|
||||
ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
|
||||
ctrlpriv->era);
|
||||
cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
|
||||
ctx->ctx_len, false, ctrlpriv->era);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
#ifdef DEBUG
|
||||
|
80
drivers/crypto/caam/caamhash_desc.c
Normal file
80
drivers/crypto/caam/caamhash_desc.c
Normal file
@@ -0,0 +1,80 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* Shared descriptors for ahash algorithms
|
||||
*
|
||||
* Copyright 2017 NXP
|
||||
*/
|
||||
|
||||
#include "compat.h"
|
||||
#include "desc_constr.h"
|
||||
#include "caamhash_desc.h"
|
||||
|
||||
/**
|
||||
* cnstr_shdsc_ahash - ahash shared descriptor
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @adata: pointer to authentication transform definitions.
|
||||
* A split key is required for SEC Era < 6; the size of the split key
|
||||
* is specified in this case.
|
||||
* Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
|
||||
* SHA256, SHA384, SHA512}.
|
||||
* @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
|
||||
* @digestsize: algorithm's digest size
|
||||
* @ctx_len: size of Context Register
|
||||
* @import_ctx: true if previous Context Register needs to be restored
|
||||
* must be true for ahash update and final
|
||||
* must be false for for ahash first and digest
|
||||
* @era: SEC Era
|
||||
*/
|
||||
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
|
||||
int digestsize, int ctx_len, bool import_ctx, int era)
|
||||
{
|
||||
u32 op = adata->algtype;
|
||||
|
||||
init_sh_desc(desc, HDR_SHARE_SERIAL);
|
||||
|
||||
/* Append key if it has been set; ahash update excluded */
|
||||
if (state != OP_ALG_AS_UPDATE && adata->keylen) {
|
||||
u32 *skip_key_load;
|
||||
|
||||
/* Skip key loading if already shared */
|
||||
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_SHRD);
|
||||
|
||||
if (era < 6)
|
||||
append_key_as_imm(desc, adata->key_virt,
|
||||
adata->keylen_pad,
|
||||
adata->keylen, CLASS_2 |
|
||||
KEY_DEST_MDHA_SPLIT | KEY_ENC);
|
||||
else
|
||||
append_proto_dkp(desc, adata);
|
||||
|
||||
set_jump_tgt_here(desc, skip_key_load);
|
||||
|
||||
op |= OP_ALG_AAI_HMAC_PRECOMP;
|
||||
}
|
||||
|
||||
/* If needed, import context from software */
|
||||
if (import_ctx)
|
||||
append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
|
||||
/* Class 2 operation */
|
||||
append_operation(desc, op | state | OP_ALG_ENCRYPT);
|
||||
|
||||
/*
|
||||
* Load from buf and/or src and write to req->result or state->context
|
||||
* Calculate remaining bytes to read
|
||||
*/
|
||||
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
/* Read remaining bytes */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
|
||||
FIFOLD_TYPE_MSG | KEY_VLF);
|
||||
/* Store class2 context bytes */
|
||||
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
}
|
||||
EXPORT_SYMBOL(cnstr_shdsc_ahash);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
|
||||
MODULE_AUTHOR("NXP Semiconductors");
|
21
drivers/crypto/caam/caamhash_desc.h
Normal file
21
drivers/crypto/caam/caamhash_desc.h
Normal file
@@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Shared descriptors for ahash algorithms
|
||||
*
|
||||
* Copyright 2017 NXP
|
||||
*/
|
||||
|
||||
#ifndef _CAAMHASH_DESC_H_
|
||||
#define _CAAMHASH_DESC_H_
|
||||
|
||||
/* length of descriptors text */
|
||||
#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
|
||||
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
|
||||
|
||||
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
|
||||
int digestsize, int ctx_len, bool import_ctx, int era);
|
||||
|
||||
#endif /* _CAAMHASH_DESC_H_ */
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* caam - Freescale FSL CAAM support for Public Key Cryptography
|
||||
*
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* caam - Freescale FSL CAAM support for hw_random
|
||||
*
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/in.h>
|
||||
@@ -39,6 +40,7 @@
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/akcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/rsa.h>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* * CAAM control-plane driver backend
|
||||
* Controller-level driver, kernel property detection, initialization
|
||||
*
|
||||
|
426
drivers/crypto/caam/dpseci.c
Normal file
426
drivers/crypto/caam/dpseci.c
Normal file
@@ -0,0 +1,426 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* Copyright 2013-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017-2018 NXP
|
||||
*/
|
||||
|
||||
#include <linux/fsl/mc.h>
|
||||
#include "dpseci.h"
|
||||
#include "dpseci_cmd.h"
|
||||
|
||||
/**
|
||||
* dpseci_open() - Open a control session for the specified object
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @dpseci_id: DPSECI unique ID
|
||||
* @token: Returned token; use in subsequent API calls
|
||||
*
|
||||
* This function can be used to open a control session for an already created
|
||||
* object; an object may have been declared statically in the DPL
|
||||
* or created dynamically.
|
||||
* This function returns a unique authentication token, associated with the
|
||||
* specific object ID and the specific MC portal; this token must be used in all
|
||||
* subsequent commands for this specific object.
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
|
||||
u16 *token)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_open *cmd_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
|
||||
cmd_flags,
|
||||
0);
|
||||
cmd_params = (struct dpseci_cmd_open *)cmd.params;
|
||||
cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*token = mc_cmd_hdr_read_token(&cmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_close() - Close the control session of the object
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
*
|
||||
* After this function is called, no further operations are allowed on the
|
||||
* object without opening a new control session.
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
|
||||
cmd_flags,
|
||||
token);
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
|
||||
cmd_flags,
|
||||
token);
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
|
||||
cmd_flags,
|
||||
token);
|
||||
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_is_enabled() - Check if the DPSECI is enabled.
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @en: Returns '1' if object is enabled; '0' otherwise
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
int *en)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_rsp_is_enabled *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
|
||||
cmd_flags,
|
||||
token);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
|
||||
*en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_attributes() - Retrieve DPSECI attributes
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @attr: Returned object's attributes
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
struct dpseci_attr *attr)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_rsp_get_attributes *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
|
||||
cmd_flags,
|
||||
token);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
|
||||
attr->id = le32_to_cpu(rsp_params->id);
|
||||
attr->num_tx_queues = rsp_params->num_tx_queues;
|
||||
attr->num_rx_queues = rsp_params->num_rx_queues;
|
||||
attr->options = le32_to_cpu(rsp_params->options);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_set_rx_queue() - Set Rx queue configuration
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @queue: Select the queue relative to number of priorities configured at
|
||||
* DPSECI creation; use DPSECI_ALL_QUEUES to configure all
|
||||
* Rx queues identically.
|
||||
* @cfg: Rx queue configuration
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, const struct dpseci_rx_queue_cfg *cfg)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_queue *cmd_params;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
|
||||
cmd_flags,
|
||||
token);
|
||||
cmd_params = (struct dpseci_cmd_queue *)cmd.params;
|
||||
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
|
||||
cmd_params->priority = cfg->dest_cfg.priority;
|
||||
cmd_params->queue = queue;
|
||||
dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
|
||||
cfg->dest_cfg.dest_type);
|
||||
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
|
||||
cmd_params->options = cpu_to_le32(cfg->options);
|
||||
dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
|
||||
cfg->order_preservation_en);
|
||||
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_rx_queue() - Retrieve Rx queue attributes
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @queue: Select the queue relative to number of priorities configured at
|
||||
* DPSECI creation
|
||||
* @attr: Returned Rx queue attributes
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, struct dpseci_rx_queue_attr *attr)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_queue *cmd_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
|
||||
cmd_flags,
|
||||
token);
|
||||
cmd_params = (struct dpseci_cmd_queue *)cmd.params;
|
||||
cmd_params->queue = queue;
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
|
||||
attr->dest_cfg.priority = cmd_params->priority;
|
||||
attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
|
||||
DEST_TYPE);
|
||||
attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
|
||||
attr->fqid = le32_to_cpu(cmd_params->fqid);
|
||||
attr->order_preservation_en =
|
||||
dpseci_get_field(cmd_params->order_preservation_en,
|
||||
ORDER_PRESERVATION);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_tx_queue() - Retrieve Tx queue attributes
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @queue: Select the queue relative to number of priorities configured at
|
||||
* DPSECI creation
|
||||
* @attr: Returned Tx queue attributes
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, struct dpseci_tx_queue_attr *attr)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_queue *cmd_params;
|
||||
struct dpseci_rsp_get_tx_queue *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
|
||||
cmd_flags,
|
||||
token);
|
||||
cmd_params = (struct dpseci_cmd_queue *)cmd.params;
|
||||
cmd_params->queue = queue;
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
|
||||
attr->fqid = le32_to_cpu(rsp_params->fqid);
|
||||
attr->priority = rsp_params->priority;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @attr: Returned SEC attributes
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
struct dpseci_sec_attr *attr)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_rsp_get_sec_attr *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
|
||||
cmd_flags,
|
||||
token);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
|
||||
attr->ip_id = le16_to_cpu(rsp_params->ip_id);
|
||||
attr->major_rev = rsp_params->major_rev;
|
||||
attr->minor_rev = rsp_params->minor_rev;
|
||||
attr->era = rsp_params->era;
|
||||
attr->deco_num = rsp_params->deco_num;
|
||||
attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
|
||||
attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
|
||||
attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
|
||||
attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
|
||||
attr->crc_acc_num = rsp_params->crc_acc_num;
|
||||
attr->pk_acc_num = rsp_params->pk_acc_num;
|
||||
attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
|
||||
attr->rng_acc_num = rsp_params->rng_acc_num;
|
||||
attr->md_acc_num = rsp_params->md_acc_num;
|
||||
attr->arc4_acc_num = rsp_params->arc4_acc_num;
|
||||
attr->des_acc_num = rsp_params->des_acc_num;
|
||||
attr->aes_acc_num = rsp_params->aes_acc_num;
|
||||
attr->ccha_acc_num = rsp_params->ccha_acc_num;
|
||||
attr->ptha_acc_num = rsp_params->ptha_acc_num;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_api_version() - Get Data Path SEC Interface API version
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @major_ver: Major version of data path sec API
|
||||
* @minor_ver: Minor version of data path sec API
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 *major_ver, u16 *minor_ver)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_rsp_get_api_version *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
|
||||
cmd_flags, 0);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
|
||||
*major_ver = le16_to_cpu(rsp_params->major);
|
||||
*minor_ver = le16_to_cpu(rsp_params->minor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_set_congestion_notification() - Set congestion group
|
||||
* notification configuration
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @cfg: congestion notification configuration
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 token, const struct dpseci_congestion_notification_cfg *cfg)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_congestion_notification *cmd_params;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(
|
||||
DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
|
||||
cmd_flags,
|
||||
token);
|
||||
cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
|
||||
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
|
||||
cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
|
||||
cmd_params->priority = cfg->dest_cfg.priority;
|
||||
dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
|
||||
cfg->dest_cfg.dest_type);
|
||||
dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
|
||||
cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
|
||||
cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
|
||||
cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
|
||||
cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
|
||||
|
||||
return mc_send_command(mc_io, &cmd);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpseci_get_congestion_notification() - Get congestion group notification
|
||||
* configuration
|
||||
* @mc_io: Pointer to MC portal's I/O object
|
||||
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
|
||||
* @token: Token of DPSECI object
|
||||
* @cfg: congestion notification configuration
|
||||
*
|
||||
* Return: '0' on success, error code otherwise
|
||||
*/
|
||||
int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 token, struct dpseci_congestion_notification_cfg *cfg)
|
||||
{
|
||||
struct fsl_mc_command cmd = { 0 };
|
||||
struct dpseci_cmd_congestion_notification *rsp_params;
|
||||
int err;
|
||||
|
||||
cmd.header = mc_encode_cmd_header(
|
||||
DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
|
||||
cmd_flags,
|
||||
token);
|
||||
err = mc_send_command(mc_io, &cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
|
||||
cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
|
||||
cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
|
||||
cfg->dest_cfg.priority = rsp_params->priority;
|
||||
cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
|
||||
CGN_DEST_TYPE);
|
||||
cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
|
||||
cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
|
||||
cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
|
||||
cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
|
||||
cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
|
||||
|
||||
return 0;
|
||||
}
|
333
drivers/crypto/caam/dpseci.h
Normal file
333
drivers/crypto/caam/dpseci.h
Normal file
@@ -0,0 +1,333 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2013-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017-2018 NXP
|
||||
*/
|
||||
#ifndef _DPSECI_H_
|
||||
#define _DPSECI_H_
|
||||
|
||||
/*
|
||||
* Data Path SEC Interface API
|
||||
* Contains initialization APIs and runtime control APIs for DPSECI
|
||||
*/
|
||||
|
||||
struct fsl_mc_io;
|
||||
|
||||
/**
|
||||
* General DPSECI macros
|
||||
*/
|
||||
|
||||
/**
|
||||
* Maximum number of Tx/Rx queues per DPSECI object
|
||||
*/
|
||||
#define DPSECI_MAX_QUEUE_NUM 16
|
||||
|
||||
/**
|
||||
* All queues considered; see dpseci_set_rx_queue()
|
||||
*/
|
||||
#define DPSECI_ALL_QUEUES (u8)(-1)
|
||||
|
||||
int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
|
||||
u16 *token);
|
||||
|
||||
int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
|
||||
/**
|
||||
* Enable the Congestion Group support
|
||||
*/
|
||||
#define DPSECI_OPT_HAS_CG 0x000020
|
||||
|
||||
/**
|
||||
* struct dpseci_cfg - Structure representing DPSECI configuration
|
||||
* @options: Any combination of the following flags:
|
||||
* DPSECI_OPT_HAS_CG
|
||||
* @num_tx_queues: num of queues towards the SEC
|
||||
* @num_rx_queues: num of queues back from the SEC
|
||||
* @priorities: Priorities for the SEC hardware processing;
|
||||
* each place in the array is the priority of the tx queue
|
||||
* towards the SEC;
|
||||
* valid priorities are configured with values 1-8;
|
||||
*/
|
||||
struct dpseci_cfg {
|
||||
u32 options;
|
||||
u8 num_tx_queues;
|
||||
u8 num_rx_queues;
|
||||
u8 priorities[DPSECI_MAX_QUEUE_NUM];
|
||||
};
|
||||
|
||||
int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
|
||||
int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
|
||||
|
||||
int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
int *en);
|
||||
|
||||
/**
|
||||
* struct dpseci_attr - Structure representing DPSECI attributes
|
||||
* @id: DPSECI object ID
|
||||
* @num_tx_queues: number of queues towards the SEC
|
||||
* @num_rx_queues: number of queues back from the SEC
|
||||
* @options: any combination of the following flags:
|
||||
* DPSECI_OPT_HAS_CG
|
||||
*/
|
||||
struct dpseci_attr {
|
||||
int id;
|
||||
u8 num_tx_queues;
|
||||
u8 num_rx_queues;
|
||||
u32 options;
|
||||
};
|
||||
|
||||
int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
struct dpseci_attr *attr);
|
||||
|
||||
/**
|
||||
* enum dpseci_dest - DPSECI destination types
|
||||
* @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
|
||||
* and does not generate FQDAN notifications; user is expected to dequeue
|
||||
* from the queue based on polling or other user-defined method
|
||||
* @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
|
||||
* notifications to the specified DPIO; user is expected to dequeue from
|
||||
* the queue only after notification is received
|
||||
* @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
|
||||
* FQDAN notifications, but is connected to the specified DPCON object;
|
||||
* user is expected to dequeue from the DPCON channel
|
||||
*/
|
||||
enum dpseci_dest {
|
||||
DPSECI_DEST_NONE = 0,
|
||||
DPSECI_DEST_DPIO,
|
||||
DPSECI_DEST_DPCON
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
|
||||
* @dest_type: Destination type
|
||||
* @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
|
||||
* @priority: Priority selection within the DPIO or DPCON channel; valid values
|
||||
* are 0-1 or 0-7, depending on the number of priorities in that channel;
|
||||
* not relevant for 'DPSECI_DEST_NONE' option
|
||||
*/
|
||||
struct dpseci_dest_cfg {
|
||||
enum dpseci_dest dest_type;
|
||||
int dest_id;
|
||||
u8 priority;
|
||||
};
|
||||
|
||||
/**
|
||||
* DPSECI queue modification options
|
||||
*/
|
||||
|
||||
/**
|
||||
* Select to modify the user's context associated with the queue
|
||||
*/
|
||||
#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
|
||||
|
||||
/**
|
||||
* Select to modify the queue's destination
|
||||
*/
|
||||
#define DPSECI_QUEUE_OPT_DEST 0x00000002
|
||||
|
||||
/**
|
||||
* Select to modify the queue's order preservation
|
||||
*/
|
||||
#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
|
||||
|
||||
/**
|
||||
* struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
|
||||
* @options: Flags representing the suggested modifications to the queue;
|
||||
* Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
|
||||
* @order_preservation_en: order preservation configuration for the rx queue
|
||||
* valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
|
||||
* @user_ctx: User context value provided in the frame descriptor of each
|
||||
* dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
|
||||
* in 'options'
|
||||
* @dest_cfg: Queue destination parameters; valid only if
|
||||
* 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
|
||||
*/
|
||||
struct dpseci_rx_queue_cfg {
|
||||
u32 options;
|
||||
int order_preservation_en;
|
||||
u64 user_ctx;
|
||||
struct dpseci_dest_cfg dest_cfg;
|
||||
};
|
||||
|
||||
int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, const struct dpseci_rx_queue_cfg *cfg);
|
||||
|
||||
/**
|
||||
* struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
|
||||
* @user_ctx: User context value provided in the frame descriptor of each
|
||||
* dequeued frame
|
||||
* @order_preservation_en: Status of the order preservation configuration on the
|
||||
* queue
|
||||
* @dest_cfg: Queue destination configuration
|
||||
* @fqid: Virtual FQID value to be used for dequeue operations
|
||||
*/
|
||||
struct dpseci_rx_queue_attr {
|
||||
u64 user_ctx;
|
||||
int order_preservation_en;
|
||||
struct dpseci_dest_cfg dest_cfg;
|
||||
u32 fqid;
|
||||
};
|
||||
|
||||
int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, struct dpseci_rx_queue_attr *attr);
|
||||
|
||||
/**
|
||||
* struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
|
||||
* @fqid: Virtual FQID to be used for sending frames to SEC hardware
|
||||
* @priority: SEC hardware processing priority for the queue
|
||||
*/
|
||||
struct dpseci_tx_queue_attr {
|
||||
u32 fqid;
|
||||
u8 priority;
|
||||
};
|
||||
|
||||
int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
u8 queue, struct dpseci_tx_queue_attr *attr);
|
||||
|
||||
/**
|
||||
* struct dpseci_sec_attr - Structure representing attributes of the SEC
|
||||
* hardware accelerator
|
||||
* @ip_id: ID for SEC
|
||||
* @major_rev: Major revision number for SEC
|
||||
* @minor_rev: Minor revision number for SEC
|
||||
* @era: SEC Era
|
||||
* @deco_num: The number of copies of the DECO that are implemented in this
|
||||
* version of SEC
|
||||
* @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
|
||||
* version of SEC
|
||||
* @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
|
||||
* version of SEC
|
||||
* @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
|
||||
* implemented in this version of SEC
|
||||
* @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
|
||||
* implemented in this version of SEC
|
||||
* @crc_acc_num: The number of copies of the CRC module that are implemented in
|
||||
* this version of SEC
|
||||
* @pk_acc_num: The number of copies of the Public Key module that are
|
||||
* implemented in this version of SEC
|
||||
* @kasumi_acc_num: The number of copies of the Kasumi module that are
|
||||
* implemented in this version of SEC
|
||||
* @rng_acc_num: The number of copies of the Random Number Generator that are
|
||||
* implemented in this version of SEC
|
||||
* @md_acc_num: The number of copies of the MDHA (Hashing module) that are
|
||||
* implemented in this version of SEC
|
||||
* @arc4_acc_num: The number of copies of the ARC4 module that are implemented
|
||||
* in this version of SEC
|
||||
* @des_acc_num: The number of copies of the DES module that are implemented in
|
||||
* this version of SEC
|
||||
* @aes_acc_num: The number of copies of the AES module that are implemented in
|
||||
* this version of SEC
|
||||
* @ccha_acc_num: The number of copies of the ChaCha20 module that are
|
||||
* implemented in this version of SEC.
|
||||
* @ptha_acc_num: The number of copies of the Poly1305 module that are
|
||||
* implemented in this version of SEC.
|
||||
**/
|
||||
struct dpseci_sec_attr {
|
||||
u16 ip_id;
|
||||
u8 major_rev;
|
||||
u8 minor_rev;
|
||||
u8 era;
|
||||
u8 deco_num;
|
||||
u8 zuc_auth_acc_num;
|
||||
u8 zuc_enc_acc_num;
|
||||
u8 snow_f8_acc_num;
|
||||
u8 snow_f9_acc_num;
|
||||
u8 crc_acc_num;
|
||||
u8 pk_acc_num;
|
||||
u8 kasumi_acc_num;
|
||||
u8 rng_acc_num;
|
||||
u8 md_acc_num;
|
||||
u8 arc4_acc_num;
|
||||
u8 des_acc_num;
|
||||
u8 aes_acc_num;
|
||||
u8 ccha_acc_num;
|
||||
u8 ptha_acc_num;
|
||||
};
|
||||
|
||||
int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
|
||||
struct dpseci_sec_attr *attr);
|
||||
|
||||
int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 *major_ver, u16 *minor_ver);
|
||||
|
||||
/**
|
||||
* enum dpseci_congestion_unit - DPSECI congestion units
|
||||
* @DPSECI_CONGESTION_UNIT_BYTES: bytes units
|
||||
* @DPSECI_CONGESTION_UNIT_FRAMES: frames units
|
||||
*/
|
||||
enum dpseci_congestion_unit {
|
||||
DPSECI_CONGESTION_UNIT_BYTES = 0,
|
||||
DPSECI_CONGESTION_UNIT_FRAMES
|
||||
};
|
||||
|
||||
/**
|
||||
* CSCN message is written to message_iova once entering a
|
||||
* congestion state (see 'threshold_entry')
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
|
||||
|
||||
/**
|
||||
* CSCN message is written to message_iova once exiting a
|
||||
* congestion state (see 'threshold_exit')
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
|
||||
|
||||
/**
|
||||
* CSCN write will attempt to allocate into a cache (coherent write);
|
||||
* valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
|
||||
|
||||
/**
|
||||
* if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
|
||||
* DPIO/DPCON's WQ channel once entering a congestion state
|
||||
* (see 'threshold_entry')
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
|
||||
|
||||
/**
|
||||
* if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
|
||||
* DPIO/DPCON's WQ channel once exiting a congestion state
|
||||
* (see 'threshold_exit')
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
|
||||
|
||||
/**
|
||||
* if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
|
||||
* to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
|
||||
* (if enabled)
|
||||
*/
|
||||
#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
|
||||
|
||||
/**
|
||||
* struct dpseci_congestion_notification_cfg - congestion notification
|
||||
* configuration
|
||||
* @units: units type
|
||||
* @threshold_entry: above this threshold we enter a congestion state.
|
||||
* set it to '0' to disable it
|
||||
* @threshold_exit: below this threshold we exit the congestion state.
|
||||
* @message_ctx: The context that will be part of the CSCN message
|
||||
* @message_iova: I/O virtual address (must be in DMA-able memory),
|
||||
* must be 16B aligned;
|
||||
* @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
|
||||
* @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
|
||||
* values
|
||||
*/
|
||||
struct dpseci_congestion_notification_cfg {
|
||||
enum dpseci_congestion_unit units;
|
||||
u32 threshold_entry;
|
||||
u32 threshold_exit;
|
||||
u64 message_ctx;
|
||||
u64 message_iova;
|
||||
struct dpseci_dest_cfg dest_cfg;
|
||||
u16 notification_mode;
|
||||
};
|
||||
|
||||
int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 token, const struct dpseci_congestion_notification_cfg *cfg);
|
||||
|
||||
int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
|
||||
u16 token, struct dpseci_congestion_notification_cfg *cfg);
|
||||
|
||||
#endif /* _DPSECI_H_ */
|
149
drivers/crypto/caam/dpseci_cmd.h
Normal file
149
drivers/crypto/caam/dpseci_cmd.h
Normal file
@@ -0,0 +1,149 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2013-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2017-2018 NXP
|
||||
*/
|
||||
|
||||
#ifndef _DPSECI_CMD_H_
|
||||
#define _DPSECI_CMD_H_
|
||||
|
||||
/* DPSECI Version */
|
||||
#define DPSECI_VER_MAJOR 5
|
||||
#define DPSECI_VER_MINOR 3
|
||||
|
||||
#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
|
||||
#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
|
||||
|
||||
/* Command versioning */
|
||||
#define DPSECI_CMD_BASE_VERSION 1
|
||||
#define DPSECI_CMD_BASE_VERSION_V2 2
|
||||
#define DPSECI_CMD_ID_OFFSET 4
|
||||
|
||||
#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
|
||||
DPSECI_CMD_BASE_VERSION)
|
||||
|
||||
#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
|
||||
DPSECI_CMD_BASE_VERSION_V2)
|
||||
|
||||
/* Command IDs */
|
||||
#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
|
||||
#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
|
||||
#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
|
||||
|
||||
#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
|
||||
#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
|
||||
#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
|
||||
#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
|
||||
|
||||
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
|
||||
#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
|
||||
#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
|
||||
#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
|
||||
#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
|
||||
#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
|
||||
|
||||
/* Macros for accessing command fields smaller than 1 byte */
|
||||
#define DPSECI_MASK(field) \
|
||||
GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
|
||||
DPSECI_##field##_SHIFT)
|
||||
|
||||
#define dpseci_set_field(var, field, val) \
|
||||
((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
|
||||
|
||||
#define dpseci_get_field(var, field) \
|
||||
(((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
|
||||
|
||||
struct dpseci_cmd_open {
|
||||
__le32 dpseci_id;
|
||||
};
|
||||
|
||||
#define DPSECI_ENABLE_SHIFT 0
|
||||
#define DPSECI_ENABLE_SIZE 1
|
||||
|
||||
struct dpseci_rsp_is_enabled {
|
||||
u8 is_enabled;
|
||||
};
|
||||
|
||||
struct dpseci_rsp_get_attributes {
|
||||
__le32 id;
|
||||
__le32 pad0;
|
||||
u8 num_tx_queues;
|
||||
u8 num_rx_queues;
|
||||
u8 pad1[6];
|
||||
__le32 options;
|
||||
};
|
||||
|
||||
#define DPSECI_DEST_TYPE_SHIFT 0
|
||||
#define DPSECI_DEST_TYPE_SIZE 4
|
||||
|
||||
#define DPSECI_ORDER_PRESERVATION_SHIFT 0
|
||||
#define DPSECI_ORDER_PRESERVATION_SIZE 1
|
||||
|
||||
struct dpseci_cmd_queue {
|
||||
__le32 dest_id;
|
||||
u8 priority;
|
||||
u8 queue;
|
||||
u8 dest_type;
|
||||
u8 pad;
|
||||
__le64 user_ctx;
|
||||
union {
|
||||
__le32 options;
|
||||
__le32 fqid;
|
||||
};
|
||||
u8 order_preservation_en;
|
||||
};
|
||||
|
||||
struct dpseci_rsp_get_tx_queue {
|
||||
__le32 pad;
|
||||
__le32 fqid;
|
||||
u8 priority;
|
||||
};
|
||||
|
||||
struct dpseci_rsp_get_sec_attr {
|
||||
__le16 ip_id;
|
||||
u8 major_rev;
|
||||
u8 minor_rev;
|
||||
u8 era;
|
||||
u8 pad0[3];
|
||||
u8 deco_num;
|
||||
u8 zuc_auth_acc_num;
|
||||
u8 zuc_enc_acc_num;
|
||||
u8 pad1;
|
||||
u8 snow_f8_acc_num;
|
||||
u8 snow_f9_acc_num;
|
||||
u8 crc_acc_num;
|
||||
u8 pad2;
|
||||
u8 pk_acc_num;
|
||||
u8 kasumi_acc_num;
|
||||
u8 rng_acc_num;
|
||||
u8 pad3;
|
||||
u8 md_acc_num;
|
||||
u8 arc4_acc_num;
|
||||
u8 des_acc_num;
|
||||
u8 aes_acc_num;
|
||||
u8 ccha_acc_num;
|
||||
u8 ptha_acc_num;
|
||||
};
|
||||
|
||||
struct dpseci_rsp_get_api_version {
|
||||
__le16 major;
|
||||
__le16 minor;
|
||||
};
|
||||
|
||||
#define DPSECI_CGN_DEST_TYPE_SHIFT 0
|
||||
#define DPSECI_CGN_DEST_TYPE_SIZE 4
|
||||
#define DPSECI_CGN_UNITS_SHIFT 4
|
||||
#define DPSECI_CGN_UNITS_SIZE 2
|
||||
|
||||
struct dpseci_cmd_congestion_notification {
|
||||
__le32 dest_id;
|
||||
__le16 notification_mode;
|
||||
u8 priority;
|
||||
u8 options;
|
||||
__le64 message_iova;
|
||||
__le64 message_ctx;
|
||||
__le32 threshold_entry;
|
||||
__le32 threshold_exit;
|
||||
};
|
||||
|
||||
#endif /* _DPSECI_CMD_H_ */
|
@@ -108,6 +108,54 @@ static const struct {
|
||||
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
|
||||
};
|
||||
|
||||
static const struct {
|
||||
u8 value;
|
||||
const char *error_text;
|
||||
} qi_error_list[] = {
|
||||
{ 0x1F, "Job terminated by FQ or ICID flush" },
|
||||
{ 0x20, "FD format error"},
|
||||
{ 0x21, "FD command format error"},
|
||||
{ 0x23, "FL format error"},
|
||||
{ 0x25, "CRJD specified in FD, but not enabled in FLC"},
|
||||
{ 0x30, "Max. buffer size too small"},
|
||||
{ 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
|
||||
{ 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
|
||||
{ 0x33, "Size over/underflow (allocate mode)"},
|
||||
{ 0x34, "Size over/underflow (reuse mode)"},
|
||||
{ 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
|
||||
{ 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
|
||||
{ 0x41, "SBC frame format not supported (allocate mode)"},
|
||||
{ 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
|
||||
{ 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
|
||||
{ 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
|
||||
{ 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
|
||||
{ 0x46, "Annotation length exceeds offset (reuse mode)"},
|
||||
{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
|
||||
{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
|
||||
{ 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
|
||||
{ 0x51, "Unsupported IF reuse mode"},
|
||||
{ 0x52, "Unsupported FL use mode"},
|
||||
{ 0x53, "Unsupported RJD use mode"},
|
||||
{ 0x54, "Unsupported inline descriptor use mode"},
|
||||
{ 0xC0, "Table buffer pool 0 depletion"},
|
||||
{ 0xC1, "Table buffer pool 1 depletion"},
|
||||
{ 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
|
||||
{ 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
|
||||
{ 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
|
||||
{ 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
|
||||
{ 0xD0, "FLC read error"},
|
||||
{ 0xD1, "FL read error"},
|
||||
{ 0xD2, "FL write error"},
|
||||
{ 0xD3, "OF SGT write error"},
|
||||
{ 0xD4, "PTA read error"},
|
||||
{ 0xD5, "PTA write error"},
|
||||
{ 0xD6, "OF SGT F-bit write error"},
|
||||
{ 0xD7, "ASA write error"},
|
||||
{ 0xE1, "FLC[ICR]=0 ICID error"},
|
||||
{ 0xE2, "FLC[ICR]=1 ICID error"},
|
||||
{ 0xE4, "source of ICID flush not trusted (BDI = 0)"},
|
||||
};
|
||||
|
||||
static const char * const cha_id_list[] = {
|
||||
"",
|
||||
"AES",
|
||||
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
|
||||
status, error, idx_str, idx, err_str, err_err_code);
|
||||
}
|
||||
|
||||
static void report_qi_status(struct device *qidev, const u32 status,
|
||||
const char *error)
|
||||
{
|
||||
u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
|
||||
const char *err_str = "unidentified error value 0x";
|
||||
char err_err_code[3] = { 0 };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
|
||||
if (qi_error_list[i].value == err_id)
|
||||
break;
|
||||
|
||||
if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
|
||||
err_str = qi_error_list[i].error_text;
|
||||
else
|
||||
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
|
||||
|
||||
dev_err(qidev, "%08x: %s: %s%s\n",
|
||||
status, error, err_str, err_err_code);
|
||||
}
|
||||
|
||||
static void report_jr_status(struct device *jrdev, const u32 status,
|
||||
const char *error)
|
||||
{
|
||||
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
|
||||
status, error, __func__);
|
||||
}
|
||||
|
||||
void caam_jr_strstatus(struct device *jrdev, u32 status)
|
||||
void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
|
||||
{
|
||||
static const struct stat_src {
|
||||
void (*report_ssed)(struct device *jrdev, const u32 status,
|
||||
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
|
||||
{ report_ccb_status, "CCB" },
|
||||
{ report_jump_status, "Jump" },
|
||||
{ report_deco_status, "DECO" },
|
||||
{ NULL, "Queue Manager Interface" },
|
||||
{ report_qi_status, "Queue Manager Interface" },
|
||||
{ report_jr_status, "Job Ring" },
|
||||
{ report_cond_code_status, "Condition Code" },
|
||||
{ NULL, NULL },
|
||||
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
|
||||
else
|
||||
dev_err(jrdev, "%d: unknown error source\n", ssrc);
|
||||
}
|
||||
EXPORT_SYMBOL(caam_jr_strstatus);
|
||||
EXPORT_SYMBOL(caam_strstatus);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("FSL CAAM error reporting");
|
||||
MODULE_AUTHOR("Freescale Semiconductor");
|
||||
|
@@ -8,7 +8,11 @@
|
||||
#ifndef CAAM_ERROR_H
|
||||
#define CAAM_ERROR_H
|
||||
#define CAAM_ERROR_STR_MAX 302
|
||||
void caam_jr_strstatus(struct device *jrdev, u32 status);
|
||||
|
||||
void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
|
||||
|
||||
#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
|
||||
#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
|
||||
|
||||
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
|
||||
int rowsize, int groupsize, struct scatterlist *sg,
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* CAAM/SEC 4.x transport/backend driver
|
||||
* JobR backend functionality
|
||||
|
@@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested);
|
||||
static u64 times_congested;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* CPU from where the module initialised. This is required because QMan driver
|
||||
* requires CGRs to be removed from same CPU from where they were originally
|
||||
* allocated.
|
||||
*/
|
||||
static int mod_init_cpu;
|
||||
|
||||
/*
|
||||
* This is a a cache of buffers, from which the users of CAAM QI driver
|
||||
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
|
||||
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
|
||||
}
|
||||
EXPORT_SYMBOL(caam_drv_ctx_rel);
|
||||
|
||||
int caam_qi_shutdown(struct device *qidev)
|
||||
void caam_qi_shutdown(struct device *qidev)
|
||||
{
|
||||
int i, ret;
|
||||
int i;
|
||||
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
struct cpumask old_cpumask = current->cpus_allowed;
|
||||
|
||||
for_each_cpu(i, cpus) {
|
||||
struct napi_struct *irqtask;
|
||||
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
|
||||
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
|
||||
}
|
||||
|
||||
/*
|
||||
* QMan driver requires CGRs to be deleted from same CPU from where they
|
||||
* were instantiated. Hence we get the module removal execute from the
|
||||
* same CPU from where it was originally inserted.
|
||||
*/
|
||||
set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
|
||||
|
||||
ret = qman_delete_cgr(&priv->cgr);
|
||||
if (ret)
|
||||
dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
|
||||
else
|
||||
qman_release_cgrid(priv->cgr.cgrid);
|
||||
qman_delete_cgr_safe(&priv->cgr);
|
||||
qman_release_cgrid(priv->cgr.cgrid);
|
||||
|
||||
kmem_cache_destroy(qi_cache);
|
||||
|
||||
/* Now that we're done with the CGRs, restore the cpus allowed mask */
|
||||
set_cpus_allowed_ptr(current, &old_cpumask);
|
||||
|
||||
platform_device_unregister(priv->qi_pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
||||
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
struct cpumask old_cpumask = current->cpus_allowed;
|
||||
static struct platform_device_info qi_pdev_info = {
|
||||
.name = "caam_qi",
|
||||
.id = PLATFORM_DEVID_NONE
|
||||
};
|
||||
|
||||
/*
|
||||
* QMAN requires CGRs to be removed from same CPU+portal from where it
|
||||
* was originally allocated. Hence we need to note down the
|
||||
* initialisation CPU and use the same CPU for module exit.
|
||||
* We select the first CPU to from the list of portal owning CPUs.
|
||||
* Then we pin module init to this CPU.
|
||||
*/
|
||||
mod_init_cpu = cpumask_first(cpus);
|
||||
set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
|
||||
|
||||
qi_pdev_info.parent = ctrldev;
|
||||
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
|
||||
qi_pdev = platform_device_register_full(&qi_pdev_info);
|
||||
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Done with the CGRs; restore the cpus allowed mask */
|
||||
set_cpus_allowed_ptr(current, &old_cpumask);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
|
||||
×_congested, &caam_fops_u64_ro);
|
||||
|
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
|
||||
enum optype {
|
||||
ENCRYPT,
|
||||
DECRYPT,
|
||||
GIVENCRYPT,
|
||||
NUM_OP
|
||||
};
|
||||
|
||||
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
|
||||
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
|
||||
|
||||
int caam_qi_init(struct platform_device *pdev);
|
||||
int caam_qi_shutdown(struct device *dev);
|
||||
void caam_qi_shutdown(struct device *dev);
|
||||
|
||||
/**
|
||||
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
|
||||
|
@@ -70,22 +70,22 @@
|
||||
extern bool caam_little_end;
|
||||
extern bool caam_imx;
|
||||
|
||||
#define caam_to_cpu(len) \
|
||||
static inline u##len caam##len ## _to_cpu(u##len val) \
|
||||
{ \
|
||||
if (caam_little_end) \
|
||||
return le##len ## _to_cpu(val); \
|
||||
else \
|
||||
return be##len ## _to_cpu(val); \
|
||||
#define caam_to_cpu(len) \
|
||||
static inline u##len caam##len ## _to_cpu(u##len val) \
|
||||
{ \
|
||||
if (caam_little_end) \
|
||||
return le##len ## _to_cpu((__force __le##len)val); \
|
||||
else \
|
||||
return be##len ## _to_cpu((__force __be##len)val); \
|
||||
}
|
||||
|
||||
#define cpu_to_caam(len) \
|
||||
static inline u##len cpu_to_caam##len(u##len val) \
|
||||
{ \
|
||||
if (caam_little_end) \
|
||||
return cpu_to_le##len(val); \
|
||||
else \
|
||||
return cpu_to_be##len(val); \
|
||||
#define cpu_to_caam(len) \
|
||||
static inline u##len cpu_to_caam##len(u##len val) \
|
||||
{ \
|
||||
if (caam_little_end) \
|
||||
return (__force u##len)cpu_to_le##len(val); \
|
||||
else \
|
||||
return (__force u##len)cpu_to_be##len(val); \
|
||||
}
|
||||
|
||||
caam_to_cpu(16)
|
||||
@@ -633,6 +633,8 @@ struct caam_job_ring {
|
||||
#define JRSTA_DECOERR_INVSIGN 0x86
|
||||
#define JRSTA_DECOERR_DSASIGN 0x87
|
||||
|
||||
#define JRSTA_QIERR_ERROR_MASK 0x00ff
|
||||
|
||||
#define JRSTA_CCBERR_JUMP 0x08000000
|
||||
#define JRSTA_CCBERR_INDEX_MASK 0xff00
|
||||
#define JRSTA_CCBERR_INDEX_SHIFT 8
|
||||
|
@@ -1,34 +1,7 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2013-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016-2017 NXP
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the name of Freescale Semiconductor nor the
|
||||
* names of its contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
|
||||
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __SG_SW_QM_H
|
||||
|
@@ -1,35 +1,7 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2015-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2017 NXP
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* * Neither the names of the above-listed copyright holders nor the
|
||||
* names of any contributors may be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
*
|
||||
* ALTERNATIVELY, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") as published by the Free Software
|
||||
* Foundation, either version 2 of that License or (at your option) any
|
||||
* later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _SG_SW_QM2_H_
|
||||
|
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
|
||||
}
|
||||
}
|
||||
|
||||
if (info->scatter_components)
|
||||
kzfree(info->scatter_components);
|
||||
|
||||
if (info->gather_components)
|
||||
kzfree(info->gather_components);
|
||||
|
||||
if (info->out_buffer)
|
||||
kzfree(info->out_buffer);
|
||||
|
||||
if (info->in_buffer)
|
||||
kzfree(info->in_buffer);
|
||||
|
||||
if (info->completion_addr)
|
||||
kzfree((void *)info->completion_addr);
|
||||
|
||||
kzfree(info->scatter_components);
|
||||
kzfree(info->gather_components);
|
||||
kzfree(info->out_buffer);
|
||||
kzfree(info->in_buffer);
|
||||
kzfree((void *)info->completion_addr);
|
||||
kzfree(info);
|
||||
}
|
||||
|
||||
|
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
|
||||
nitrox_hal.o \
|
||||
nitrox_reqmgr.o \
|
||||
nitrox_algs.o
|
||||
|
||||
n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
|
||||
n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
|
||||
|
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
|
||||
struct nitrox_device *nitrox_get_first_device(void);
|
||||
void nitrox_put_device(struct nitrox_device *ndev);
|
||||
|
||||
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
|
||||
int nitrox_pf_init_isr(struct nitrox_device *ndev);
|
||||
|
||||
int nitrox_common_sw_init(struct nitrox_device *ndev);
|
||||
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
|
||||
|
||||
void pkt_slc_resp_handler(unsigned long data);
|
||||
void pkt_slc_resp_tasklet(unsigned long data);
|
||||
int nitrox_process_se_request(struct nitrox_device *ndev,
|
||||
struct se_crypto_request *req,
|
||||
completion_t cb,
|
||||
struct skcipher_request *skreq);
|
||||
void backlog_qflush_work(struct work_struct *work);
|
||||
|
||||
void nitrox_config_emu_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
|
||||
void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
|
||||
void nitrox_config_nps_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pom_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_rand_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_efl_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmi_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmo_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_lbc_unit(struct nitrox_device *ndev);
|
||||
void invalidate_lbc(struct nitrox_device *ndev);
|
||||
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
|
||||
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
|
||||
|
||||
#endif /* __NITROX_COMMON_H */
|
||||
|
@@ -7,9 +7,16 @@
|
||||
|
||||
/* EMU clusters */
|
||||
#define NR_CLUSTERS 4
|
||||
/* Maximum cores per cluster,
|
||||
* varies based on partname
|
||||
*/
|
||||
#define AE_CORES_PER_CLUSTER 20
|
||||
#define SE_CORES_PER_CLUSTER 16
|
||||
|
||||
#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
|
||||
#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
|
||||
#define ZIP_MAX_CORES 5
|
||||
|
||||
/* BIST registers */
|
||||
#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
|
||||
#define UCD_BIST_STATUS 0x12C0070
|
||||
@@ -111,6 +118,9 @@
|
||||
#define LBC_ELM_VF65_128_INT 0x120C000
|
||||
#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
|
||||
|
||||
#define RST_BOOT 0x10C1600
|
||||
#define FUS_DAT1 0x10C1408
|
||||
|
||||
/* PEM registers */
|
||||
#define PEM0_INT 0x1080428
|
||||
|
||||
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
|
||||
} s;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rst_boot: RST Boot Register
|
||||
* @jtcsrdis: when set, internal CSR access via JTAG TAP controller
|
||||
* is disabled
|
||||
* @jt_tst_mode: JTAG test mode
|
||||
* @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
|
||||
* 0x1 = 1.8V
|
||||
* 0x2 = 2.5V
|
||||
* 0x4 = 3.3V
|
||||
* All other values are reserved
|
||||
* @pnr_mul: clock multiplier
|
||||
* @lboot: last boot cause mask, resets only with PLL_DC_OK
|
||||
* @rboot: determines whether core 0 remains in reset after
|
||||
* chip cold or warm or soft reset
|
||||
* @rboot_pin: read only access to REMOTE_BOOT pin
|
||||
*/
|
||||
union rst_boot {
|
||||
u64 value;
|
||||
struct {
|
||||
#if (defined(__BIG_ENDIAN_BITFIELD))
|
||||
u64 raz_63 : 1;
|
||||
u64 jtcsrdis : 1;
|
||||
u64 raz_59_61 : 3;
|
||||
u64 jt_tst_mode : 1;
|
||||
u64 raz_40_57 : 18;
|
||||
u64 io_supply : 3;
|
||||
u64 raz_30_36 : 7;
|
||||
u64 pnr_mul : 6;
|
||||
u64 raz_12_23 : 12;
|
||||
u64 lboot : 10;
|
||||
u64 rboot : 1;
|
||||
u64 rboot_pin : 1;
|
||||
#else
|
||||
u64 rboot_pin : 1;
|
||||
u64 rboot : 1;
|
||||
u64 lboot : 10;
|
||||
u64 raz_12_23 : 12;
|
||||
u64 pnr_mul : 6;
|
||||
u64 raz_30_36 : 7;
|
||||
u64 io_supply : 3;
|
||||
u64 raz_40_57 : 18;
|
||||
u64 jt_tst_mode : 1;
|
||||
u64 raz_59_61 : 3;
|
||||
u64 jtcsrdis : 1;
|
||||
u64 raz_63 : 1;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fus_dat1: Fuse Data 1 Register
|
||||
* @pll_mul: main clock PLL multiplier hardware limit
|
||||
* @pll_half_dis: main clock PLL control
|
||||
* @efus_lck: efuse lockdown
|
||||
* @zip_info: ZIP information
|
||||
* @bar2_sz_conf: when zero, BAR2 size conforms to
|
||||
* PCIe specification
|
||||
* @efus_ign: efuse ignore
|
||||
* @nozip: ZIP disable
|
||||
* @pll_alt_matrix: select alternate PLL matrix
|
||||
* @pll_bwadj_denom: select CLKF denominator for
|
||||
* BWADJ value
|
||||
* @chip_id: chip ID
|
||||
*/
|
||||
union fus_dat1 {
|
||||
u64 value;
|
||||
struct {
|
||||
#if (defined(__BIG_ENDIAN_BITFIELD))
|
||||
u64 raz_57_63 : 7;
|
||||
u64 pll_mul : 3;
|
||||
u64 pll_half_dis : 1;
|
||||
u64 raz_43_52 : 10;
|
||||
u64 efus_lck : 3;
|
||||
u64 raz_26_39 : 14;
|
||||
u64 zip_info : 5;
|
||||
u64 bar2_sz_conf : 1;
|
||||
u64 efus_ign : 1;
|
||||
u64 nozip : 1;
|
||||
u64 raz_11_17 : 7;
|
||||
u64 pll_alt_matrix : 1;
|
||||
u64 pll_bwadj_denom : 2;
|
||||
u64 chip_id : 8;
|
||||
#else
|
||||
u64 chip_id : 8;
|
||||
u64 pll_bwadj_denom : 2;
|
||||
u64 pll_alt_matrix : 1;
|
||||
u64 raz_11_17 : 7;
|
||||
u64 nozip : 1;
|
||||
u64 efus_ign : 1;
|
||||
u64 bar2_sz_conf : 1;
|
||||
u64 zip_info : 5;
|
||||
u64 raz_26_39 : 14;
|
||||
u64 efus_lck : 3;
|
||||
u64 raz_43_52 : 10;
|
||||
u64 pll_half_dis : 1;
|
||||
u64 pll_mul : 3;
|
||||
u64 raz_57_63 : 7;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __NITROX_CSR_H */
|
||||
|
115
drivers/crypto/cavium/nitrox/nitrox_debugfs.c
Normal file
115
drivers/crypto/cavium/nitrox/nitrox_debugfs.c
Normal file
@@ -0,0 +1,115 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include "nitrox_csr.h"
|
||||
#include "nitrox_dev.h"
|
||||
|
||||
static int firmware_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
|
||||
seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int firmware_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, firmware_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations firmware_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = firmware_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int device_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
|
||||
seq_printf(s, "NITROX [%d]\n", ndev->idx);
|
||||
seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
|
||||
seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
|
||||
seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
|
||||
seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
|
||||
seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
|
||||
ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, device_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations nitrox_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = nitrox_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int stats_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
|
||||
seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
|
||||
seq_printf(s, " Posted: %llu\n",
|
||||
(u64)atomic64_read(&ndev->stats.posted));
|
||||
seq_printf(s, " Completed: %llu\n",
|
||||
(u64)atomic64_read(&ndev->stats.completed));
|
||||
seq_printf(s, " Dropped: %llu\n",
|
||||
(u64)atomic64_read(&ndev->stats.dropped));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_stats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, stats_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations nitrox_stats_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = nitrox_stats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
void nitrox_debugfs_exit(struct nitrox_device *ndev)
|
||||
{
|
||||
debugfs_remove_recursive(ndev->debugfs_dir);
|
||||
ndev->debugfs_dir = NULL;
|
||||
}
|
||||
|
||||
int nitrox_debugfs_init(struct nitrox_device *ndev)
|
||||
{
|
||||
struct dentry *dir, *f;
|
||||
|
||||
dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
ndev->debugfs_dir = dir;
|
||||
f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
nitrox_debugfs_exit(ndev);
|
||||
return -ENODEV;
|
||||
}
|
@@ -5,92 +5,123 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/if.h>
|
||||
|
||||
#define VERSION_LEN 32
|
||||
|
||||
/**
|
||||
* struct nitrox_cmdq - NITROX command queue
|
||||
* @cmd_qlock: command queue lock
|
||||
* @resp_qlock: response queue lock
|
||||
* @backlog_qlock: backlog queue lock
|
||||
* @ndev: NITROX device
|
||||
* @response_head: submitted request list
|
||||
* @backlog_head: backlog queue
|
||||
* @dbell_csr_addr: doorbell register address for this queue
|
||||
* @compl_cnt_csr_addr: completion count register address of the slc port
|
||||
* @base: command queue base address
|
||||
* @dma: dma address of the base
|
||||
* @pending_count: request pending at device
|
||||
* @backlog_count: backlog request count
|
||||
* @write_idx: next write index for the command
|
||||
* @instr_size: command size
|
||||
* @qno: command queue number
|
||||
* @qsize: command queue size
|
||||
* @unalign_base: unaligned base address
|
||||
* @unalign_dma: unaligned dma address
|
||||
*/
|
||||
struct nitrox_cmdq {
|
||||
/* command queue lock */
|
||||
spinlock_t cmdq_lock;
|
||||
/* response list lock */
|
||||
spinlock_t response_lock;
|
||||
/* backlog list lock */
|
||||
spinlock_t backlog_lock;
|
||||
|
||||
/* request submitted to chip, in progress */
|
||||
struct list_head response_head;
|
||||
/* hw queue full, hold in backlog list */
|
||||
struct list_head backlog_head;
|
||||
|
||||
/* doorbell address */
|
||||
u8 __iomem *dbell_csr_addr;
|
||||
/* base address of the queue */
|
||||
u8 *head;
|
||||
spinlock_t cmd_qlock;
|
||||
spinlock_t resp_qlock;
|
||||
spinlock_t backlog_qlock;
|
||||
|
||||
struct nitrox_device *ndev;
|
||||
/* flush pending backlog commands */
|
||||
struct list_head response_head;
|
||||
struct list_head backlog_head;
|
||||
|
||||
u8 __iomem *dbell_csr_addr;
|
||||
u8 __iomem *compl_cnt_csr_addr;
|
||||
u8 *base;
|
||||
dma_addr_t dma;
|
||||
|
||||
struct work_struct backlog_qflush;
|
||||
|
||||
/* requests posted waiting for completion */
|
||||
atomic_t pending_count;
|
||||
/* requests in backlog queues */
|
||||
atomic_t backlog_count;
|
||||
|
||||
int write_idx;
|
||||
/* command size 32B/64B */
|
||||
u8 instr_size;
|
||||
u8 qno;
|
||||
u32 qsize;
|
||||
|
||||
/* unaligned addresses */
|
||||
u8 *head_unaligned;
|
||||
dma_addr_t dma_unaligned;
|
||||
/* dma address of the base */
|
||||
dma_addr_t dma;
|
||||
u8 *unalign_base;
|
||||
dma_addr_t unalign_dma;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nitrox_hw - NITROX hardware information
|
||||
* @partname: partname ex: CNN55xxx-xxx
|
||||
* @fw_name: firmware version
|
||||
* @freq: NITROX frequency
|
||||
* @vendor_id: vendor ID
|
||||
* @device_id: device ID
|
||||
* @revision_id: revision ID
|
||||
* @se_cores: number of symmetric cores
|
||||
* @ae_cores: number of asymmetric cores
|
||||
* @zip_cores: number of zip cores
|
||||
*/
|
||||
struct nitrox_hw {
|
||||
/* firmware version */
|
||||
char partname[IFNAMSIZ * 2];
|
||||
char fw_name[VERSION_LEN];
|
||||
|
||||
int freq;
|
||||
u16 vendor_id;
|
||||
u16 device_id;
|
||||
u8 revision_id;
|
||||
|
||||
/* CNN55XX cores */
|
||||
u8 se_cores;
|
||||
u8 ae_cores;
|
||||
u8 zip_cores;
|
||||
};
|
||||
|
||||
#define MAX_MSIX_VECTOR_NAME 20
|
||||
/**
|
||||
* vectors for queues (64 AE, 64 SE and 64 ZIP) and
|
||||
* error condition/mailbox.
|
||||
struct nitrox_stats {
|
||||
atomic64_t posted;
|
||||
atomic64_t completed;
|
||||
atomic64_t dropped;
|
||||
};
|
||||
|
||||
#define IRQ_NAMESZ 32
|
||||
|
||||
struct nitrox_q_vector {
|
||||
char name[IRQ_NAMESZ];
|
||||
bool valid;
|
||||
int ring;
|
||||
struct tasklet_struct resp_tasklet;
|
||||
union {
|
||||
struct nitrox_cmdq *cmdq;
|
||||
struct nitrox_device *ndev;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* NITROX Device states
|
||||
*/
|
||||
#define MAX_MSIX_VECTORS 192
|
||||
|
||||
struct nitrox_msix {
|
||||
struct msix_entry *entries;
|
||||
char **names;
|
||||
DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
|
||||
u32 nr_entries;
|
||||
enum ndev_state {
|
||||
__NDEV_NOT_READY,
|
||||
__NDEV_READY,
|
||||
__NDEV_IN_RESET,
|
||||
};
|
||||
|
||||
struct bh_data {
|
||||
/* slc port completion count address */
|
||||
u8 __iomem *completion_cnt_csr_addr;
|
||||
|
||||
struct nitrox_cmdq *cmdq;
|
||||
struct tasklet_struct resp_handler;
|
||||
/* NITROX support modes for VF(s) */
|
||||
enum vf_mode {
|
||||
__NDEV_MODE_PF,
|
||||
__NDEV_MODE_VF16,
|
||||
__NDEV_MODE_VF32,
|
||||
__NDEV_MODE_VF64,
|
||||
__NDEV_MODE_VF128,
|
||||
};
|
||||
|
||||
struct nitrox_bh {
|
||||
struct bh_data *slc;
|
||||
};
|
||||
|
||||
/* NITROX-V driver state */
|
||||
#define NITROX_UCODE_LOADED 0
|
||||
#define NITROX_READY 1
|
||||
#define __NDEV_SRIOV_BIT 0
|
||||
|
||||
/* command queue size */
|
||||
#define DEFAULT_CMD_QLEN 2048
|
||||
@@ -98,7 +129,6 @@ struct nitrox_bh {
|
||||
#define CMD_TIMEOUT 2000
|
||||
|
||||
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
|
||||
#define PF_MODE 0
|
||||
|
||||
#define NITROX_CSR_ADDR(ndev, offset) \
|
||||
((ndev)->bar_addr + (offset))
|
||||
@@ -108,17 +138,18 @@ struct nitrox_bh {
|
||||
* @list: pointer to linked list of devices
|
||||
* @bar_addr: iomap address
|
||||
* @pdev: PCI device information
|
||||
* @status: NITROX status
|
||||
* @state: NITROX device state
|
||||
* @flags: flags to indicate device the features
|
||||
* @timeout: Request timeout in jiffies
|
||||
* @refcnt: Device usage count
|
||||
* @idx: device index (0..N)
|
||||
* @node: NUMA node id attached
|
||||
* @qlen: Command queue length
|
||||
* @nr_queues: Number of command queues
|
||||
* @mode: Device mode PF/VF
|
||||
* @ctx_pool: DMA pool for crypto context
|
||||
* @pkt_cmdqs: SE Command queues
|
||||
* @msix: MSI-X information
|
||||
* @bh: post processing work
|
||||
* @pkt_inq: Packet input rings
|
||||
* @qvec: MSI-X queue vectors information
|
||||
* @hw: hardware information
|
||||
* @debugfs_dir: debugfs directory
|
||||
*/
|
||||
@@ -128,7 +159,8 @@ struct nitrox_device {
|
||||
u8 __iomem *bar_addr;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
unsigned long status;
|
||||
atomic_t state;
|
||||
unsigned long flags;
|
||||
unsigned long timeout;
|
||||
refcount_t refcnt;
|
||||
|
||||
@@ -136,13 +168,16 @@ struct nitrox_device {
|
||||
int node;
|
||||
u16 qlen;
|
||||
u16 nr_queues;
|
||||
int num_vfs;
|
||||
enum vf_mode mode;
|
||||
|
||||
struct dma_pool *ctx_pool;
|
||||
struct nitrox_cmdq *pkt_cmdqs;
|
||||
struct nitrox_cmdq *pkt_inq;
|
||||
|
||||
struct nitrox_msix msix;
|
||||
struct nitrox_bh bh;
|
||||
struct nitrox_q_vector *qvec;
|
||||
int num_vecs;
|
||||
|
||||
struct nitrox_stats stats;
|
||||
struct nitrox_hw hw;
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
struct dentry *debugfs_dir;
|
||||
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
|
||||
writeq(value, (ndev->bar_addr + offset));
|
||||
}
|
||||
|
||||
static inline int nitrox_ready(struct nitrox_device *ndev)
|
||||
static inline bool nitrox_ready(struct nitrox_device *ndev)
|
||||
{
|
||||
return test_bit(NITROX_READY, &ndev->status);
|
||||
return atomic_read(&ndev->state) == __NDEV_READY;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int nitrox_debugfs_init(struct nitrox_device *ndev);
|
||||
void nitrox_debugfs_exit(struct nitrox_device *ndev);
|
||||
#else
|
||||
static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
#endif /* __NITROX_DEV_H */
|
||||
|
@@ -4,6 +4,8 @@
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_csr.h"
|
||||
|
||||
#define PLL_REF_CLK 50
|
||||
|
||||
/**
|
||||
* emu_enable_cores - Enable EMU cluster cores.
|
||||
* @ndev: N5 device
|
||||
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndev->nr_queues; i++) {
|
||||
struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
|
||||
struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
|
||||
union nps_pkt_in_instr_rsize pkt_in_rsize;
|
||||
u64 offset;
|
||||
|
||||
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
|
||||
/* disable ILK interface */
|
||||
core_gbl_vfcfg.value = 0;
|
||||
core_gbl_vfcfg.s.ilk_disable = 1;
|
||||
core_gbl_vfcfg.s.cfg = PF_MODE;
|
||||
core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
|
||||
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
|
||||
/* config input and solicit ports */
|
||||
nitrox_config_pkt_input_rings(ndev);
|
||||
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
|
||||
offset = LBC_ELM_VF65_128_INT_ENA_W1S;
|
||||
nitrox_write_csr(ndev, offset, (~0ULL));
|
||||
}
|
||||
|
||||
void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
|
||||
{
|
||||
union nps_core_gbl_vfcfg vfcfg;
|
||||
|
||||
vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
|
||||
vfcfg.s.cfg = mode & 0x7;
|
||||
|
||||
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
|
||||
}
|
||||
|
||||
void nitrox_get_hwinfo(struct nitrox_device *ndev)
|
||||
{
|
||||
union emu_fuse_map emu_fuse;
|
||||
union rst_boot rst_boot;
|
||||
union fus_dat1 fus_dat1;
|
||||
unsigned char name[IFNAMSIZ * 2] = {};
|
||||
int i, dead_cores;
|
||||
u64 offset;
|
||||
|
||||
/* get core frequency */
|
||||
offset = RST_BOOT;
|
||||
rst_boot.value = nitrox_read_csr(ndev, offset);
|
||||
ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
|
||||
|
||||
for (i = 0; i < NR_CLUSTERS; i++) {
|
||||
offset = EMU_FUSE_MAPX(i);
|
||||
emu_fuse.value = nitrox_read_csr(ndev, offset);
|
||||
if (emu_fuse.s.valid) {
|
||||
dead_cores = hweight32(emu_fuse.s.ae_fuse);
|
||||
ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
|
||||
dead_cores = hweight16(emu_fuse.s.se_fuse);
|
||||
ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
|
||||
}
|
||||
}
|
||||
/* find zip hardware availability */
|
||||
offset = FUS_DAT1;
|
||||
fus_dat1.value = nitrox_read_csr(ndev, offset);
|
||||
if (!fus_dat1.nozip) {
|
||||
dead_cores = hweight8(fus_dat1.zip_info);
|
||||
ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
|
||||
}
|
||||
|
||||
/* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
|
||||
if (ndev->hw.ae_cores == AE_MAX_CORES) {
|
||||
switch (ndev->hw.se_cores) {
|
||||
case SE_MAX_CORES:
|
||||
i = snprintf(name, sizeof(name), "CNN5560");
|
||||
break;
|
||||
case 40:
|
||||
i = snprintf(name, sizeof(name), "CNN5560s");
|
||||
break;
|
||||
}
|
||||
} else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
|
||||
i = snprintf(name, sizeof(name), "CNN5530");
|
||||
} else {
|
||||
i = snprintf(name, sizeof(name), "CNN5560i");
|
||||
}
|
||||
|
||||
snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
|
||||
ndev->hw.freq, ndev->hw.revision_id);
|
||||
|
||||
/* copy partname */
|
||||
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
|
||||
}
|
||||
|
23
drivers/crypto/cavium/nitrox/nitrox_hal.h
Normal file
23
drivers/crypto/cavium/nitrox/nitrox_hal.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __NITROX_HAL_H
|
||||
#define __NITROX_HAL_H
|
||||
|
||||
#include "nitrox_dev.h"
|
||||
|
||||
void nitrox_config_emu_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
|
||||
void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
|
||||
void nitrox_config_nps_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_pom_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_rand_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_efl_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmi_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_bmo_unit(struct nitrox_device *ndev);
|
||||
void nitrox_config_lbc_unit(struct nitrox_device *ndev);
|
||||
void invalidate_lbc(struct nitrox_device *ndev);
|
||||
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
|
||||
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
|
||||
void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
|
||||
void nitrox_get_hwinfo(struct nitrox_device *ndev);
|
||||
|
||||
#endif /* __NITROX_HAL_H */
|
@@ -6,9 +6,16 @@
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_csr.h"
|
||||
#include "nitrox_common.h"
|
||||
#include "nitrox_hal.h"
|
||||
|
||||
/**
|
||||
* One vector for each type of ring
|
||||
* - NPS packet ring, AQMQ ring and ZQMQ ring
|
||||
*/
|
||||
#define NR_RING_VECTORS 3
|
||||
#define NPS_CORE_INT_ACTIVE_ENTRY 192
|
||||
/* base entry for packet ring/port */
|
||||
#define PKT_RING_MSIX_BASE 0
|
||||
#define NON_RING_MSIX_BASE 192
|
||||
|
||||
/**
|
||||
* nps_pkt_slc_isr - IRQ handler for NPS solicit port
|
||||
@@ -17,13 +24,14 @@
|
||||
*/
|
||||
static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
|
||||
{
|
||||
struct bh_data *slc = data;
|
||||
union nps_pkt_slc_cnts pkt_slc_cnts;
|
||||
struct nitrox_q_vector *qvec = data;
|
||||
union nps_pkt_slc_cnts slc_cnts;
|
||||
struct nitrox_cmdq *cmdq = qvec->cmdq;
|
||||
|
||||
pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
|
||||
slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
|
||||
/* New packet on SLC output port */
|
||||
if (pkt_slc_cnts.s.slc_int)
|
||||
tasklet_hi_schedule(&slc->resp_handler);
|
||||
if (slc_cnts.s.slc_int)
|
||||
tasklet_hi_schedule(&qvec->resp_tasklet);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -190,56 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
|
||||
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
|
||||
* @ndev: NITROX device
|
||||
*/
|
||||
static void clear_nps_core_int_active(struct nitrox_device *ndev)
|
||||
static void nps_core_int_tasklet(unsigned long data)
|
||||
{
|
||||
union nps_core_int_active core_int_active;
|
||||
struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
|
||||
struct nitrox_device *ndev = qvec->ndev;
|
||||
|
||||
core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
|
||||
|
||||
if (core_int_active.s.nps_core)
|
||||
clear_nps_core_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.nps_pkt)
|
||||
clear_nps_pkt_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.pom)
|
||||
clear_pom_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.pem)
|
||||
clear_pem_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.lbc)
|
||||
clear_lbc_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.efl)
|
||||
clear_efl_err_intr(ndev);
|
||||
|
||||
if (core_int_active.s.bmi)
|
||||
clear_bmi_err_intr(ndev);
|
||||
|
||||
/* If more work callback the ISR, set resend */
|
||||
core_int_active.s.resend = 1;
|
||||
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
|
||||
/* if pf mode do queue recovery */
|
||||
if (ndev->mode == __NDEV_MODE_PF) {
|
||||
} else {
|
||||
/**
|
||||
* if VF(s) enabled communicate the error information
|
||||
* to VF(s)
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* nps_core_int_isr - interrupt handler for NITROX errors and
|
||||
* mailbox communication
|
||||
*/
|
||||
static irqreturn_t nps_core_int_isr(int irq, void *data)
|
||||
{
|
||||
struct nitrox_device *ndev = data;
|
||||
union nps_core_int_active core_int;
|
||||
|
||||
clear_nps_core_int_active(ndev);
|
||||
core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
|
||||
|
||||
if (core_int.s.nps_core)
|
||||
clear_nps_core_err_intr(ndev);
|
||||
|
||||
if (core_int.s.nps_pkt)
|
||||
clear_nps_pkt_err_intr(ndev);
|
||||
|
||||
if (core_int.s.pom)
|
||||
clear_pom_err_intr(ndev);
|
||||
|
||||
if (core_int.s.pem)
|
||||
clear_pem_err_intr(ndev);
|
||||
|
||||
if (core_int.s.lbc)
|
||||
clear_lbc_err_intr(ndev);
|
||||
|
||||
if (core_int.s.efl)
|
||||
clear_efl_err_intr(ndev);
|
||||
|
||||
if (core_int.s.bmi)
|
||||
clear_bmi_err_intr(ndev);
|
||||
|
||||
/* If more work callback the ISR, set resend */
|
||||
core_int.s.resend = 1;
|
||||
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int nitrox_enable_msix(struct nitrox_device *ndev)
|
||||
void nitrox_unregister_interrupts(struct nitrox_device *ndev)
|
||||
{
|
||||
struct msix_entry *entries;
|
||||
char **names;
|
||||
int i, nr_entries, ret;
|
||||
struct pci_dev *pdev = ndev->pdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndev->num_vecs; i++) {
|
||||
struct nitrox_q_vector *qvec;
|
||||
int vec;
|
||||
|
||||
qvec = ndev->qvec + i;
|
||||
if (!qvec->valid)
|
||||
continue;
|
||||
|
||||
/* get the vector number */
|
||||
vec = pci_irq_vector(pdev, i);
|
||||
irq_set_affinity_hint(vec, NULL);
|
||||
free_irq(vec, qvec);
|
||||
|
||||
tasklet_disable(&qvec->resp_tasklet);
|
||||
tasklet_kill(&qvec->resp_tasklet);
|
||||
qvec->valid = false;
|
||||
}
|
||||
kfree(ndev->qvec);
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
int nitrox_register_interrupts(struct nitrox_device *ndev)
|
||||
{
|
||||
struct pci_dev *pdev = ndev->pdev;
|
||||
struct nitrox_q_vector *qvec;
|
||||
int nr_vecs, vec, cpu;
|
||||
int ret, i;
|
||||
|
||||
/*
|
||||
* PF MSI-X vectors
|
||||
@@ -253,216 +297,71 @@ static int nitrox_enable_msix(struct nitrox_device *ndev)
|
||||
* ....
|
||||
* Entry 192: NPS_CORE_INT_ACTIVE
|
||||
*/
|
||||
nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
|
||||
entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
|
||||
GFP_KERNEL, ndev->node);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
nr_vecs = pci_msix_vec_count(pdev);
|
||||
|
||||
names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
|
||||
if (!names) {
|
||||
kfree(entries);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* fill entires */
|
||||
for (i = 0; i < (nr_entries - 1); i++)
|
||||
entries[i].entry = i;
|
||||
|
||||
entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
|
||||
|
||||
for (i = 0; i < nr_entries; i++) {
|
||||
*(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
|
||||
if (!(*(names + i))) {
|
||||
ret = -ENOMEM;
|
||||
goto msix_fail;
|
||||
}
|
||||
}
|
||||
ndev->msix.entries = entries;
|
||||
ndev->msix.names = names;
|
||||
ndev->msix.nr_entries = nr_entries;
|
||||
|
||||
ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
|
||||
ndev->msix.nr_entries);
|
||||
if (ret) {
|
||||
dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
|
||||
ret);
|
||||
goto msix_fail;
|
||||
}
|
||||
return 0;
|
||||
|
||||
msix_fail:
|
||||
for (i = 0; i < nr_entries; i++)
|
||||
kfree(*(names + i));
|
||||
|
||||
kfree(entries);
|
||||
kfree(names);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ndev->bh.slc)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ndev->nr_queues; i++) {
|
||||
struct bh_data *bh = &ndev->bh.slc[i];
|
||||
|
||||
tasklet_disable(&bh->resp_handler);
|
||||
tasklet_kill(&bh->resp_handler);
|
||||
}
|
||||
kfree(ndev->bh.slc);
|
||||
ndev->bh.slc = NULL;
|
||||
}
|
||||
|
||||
static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
|
||||
{
|
||||
u32 size;
|
||||
int i;
|
||||
|
||||
size = ndev->nr_queues * sizeof(struct bh_data);
|
||||
ndev->bh.slc = kzalloc(size, GFP_KERNEL);
|
||||
if (!ndev->bh.slc)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ndev->nr_queues; i++) {
|
||||
struct bh_data *bh = &ndev->bh.slc[i];
|
||||
u64 offset;
|
||||
|
||||
offset = NPS_PKT_SLC_CNTSX(i);
|
||||
/* pre calculate completion count address */
|
||||
bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
|
||||
bh->cmdq = &ndev->pkt_cmdqs[i];
|
||||
|
||||
tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
|
||||
(unsigned long)bh);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_request_irqs(struct nitrox_device *ndev)
|
||||
{
|
||||
struct pci_dev *pdev = ndev->pdev;
|
||||
struct msix_entry *msix_ent = ndev->msix.entries;
|
||||
int nr_ring_vectors, i = 0, ring, cpu, ret;
|
||||
char *name;
|
||||
|
||||
/*
|
||||
* PF MSI-X vectors
|
||||
*
|
||||
* Entry 0: NPS PKT ring 0
|
||||
* Entry 1: AQMQ ring 0
|
||||
* Entry 2: ZQM ring 0
|
||||
* Entry 3: NPS PKT ring 1
|
||||
* ....
|
||||
* Entry 192: NPS_CORE_INT_ACTIVE
|
||||
*/
|
||||
nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
|
||||
|
||||
/* request irq for pkt ring/ports only */
|
||||
while (i < nr_ring_vectors) {
|
||||
name = *(ndev->msix.names + i);
|
||||
ring = (i / NR_RING_VECTORS);
|
||||
snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
|
||||
ndev->idx, ring);
|
||||
|
||||
ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
|
||||
name, &ndev->bh.slc[ring]);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get irq %d for %s\n",
|
||||
msix_ent[i].vector, name);
|
||||
return ret;
|
||||
}
|
||||
cpu = ring % num_online_cpus();
|
||||
irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
|
||||
|
||||
set_bit(i, ndev->msix.irqs);
|
||||
i += NR_RING_VECTORS;
|
||||
}
|
||||
|
||||
/* Request IRQ for NPS_CORE_INT_ACTIVE */
|
||||
name = *(ndev->msix.names + i);
|
||||
snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
|
||||
ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get irq %d for %s\n",
|
||||
msix_ent[i].vector, name);
|
||||
/* Enable MSI-X */
|
||||
ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
|
||||
if (ret < 0) {
|
||||
dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
|
||||
return ret;
|
||||
}
|
||||
set_bit(i, ndev->msix.irqs);
|
||||
ndev->num_vecs = nr_vecs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nitrox_disable_msix(struct nitrox_device *ndev)
|
||||
{
|
||||
struct msix_entry *msix_ent = ndev->msix.entries;
|
||||
char **names = ndev->msix.names;
|
||||
int i = 0, ring, nr_ring_vectors;
|
||||
|
||||
nr_ring_vectors = ndev->msix.nr_entries - 1;
|
||||
|
||||
/* clear pkt ring irqs */
|
||||
while (i < nr_ring_vectors) {
|
||||
if (test_and_clear_bit(i, ndev->msix.irqs)) {
|
||||
ring = (i / NR_RING_VECTORS);
|
||||
irq_set_affinity_hint(msix_ent[i].vector, NULL);
|
||||
free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
|
||||
}
|
||||
i += NR_RING_VECTORS;
|
||||
ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
|
||||
if (!ndev->qvec) {
|
||||
pci_free_irq_vectors(pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
irq_set_affinity_hint(msix_ent[i].vector, NULL);
|
||||
free_irq(msix_ent[i].vector, ndev);
|
||||
clear_bit(i, ndev->msix.irqs);
|
||||
|
||||
kfree(ndev->msix.entries);
|
||||
for (i = 0; i < ndev->msix.nr_entries; i++)
|
||||
kfree(*(names + i));
|
||||
/* request irqs for packet rings/ports */
|
||||
for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
|
||||
qvec = &ndev->qvec[i];
|
||||
|
||||
kfree(names);
|
||||
pci_disable_msix(ndev->pdev);
|
||||
}
|
||||
qvec->ring = i / NR_RING_VECTORS;
|
||||
if (qvec->ring >= ndev->nr_queues)
|
||||
break;
|
||||
|
||||
/**
|
||||
* nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
|
||||
* @ndev: NITROX device
|
||||
*/
|
||||
void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
|
||||
{
|
||||
nitrox_disable_msix(ndev);
|
||||
nitrox_cleanup_pkt_slc_bh(ndev);
|
||||
}
|
||||
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
|
||||
/* get the vector number */
|
||||
vec = pci_irq_vector(pdev, i);
|
||||
ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
|
||||
if (ret) {
|
||||
dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
|
||||
qvec->ring);
|
||||
goto irq_fail;
|
||||
}
|
||||
cpu = qvec->ring % num_online_cpus();
|
||||
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
|
||||
|
||||
/**
|
||||
* nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
|
||||
* @ndev: NITROX device
|
||||
*
|
||||
* Return: 0 on success, a negative value on failure.
|
||||
*/
|
||||
int nitrox_pf_init_isr(struct nitrox_device *ndev)
|
||||
{
|
||||
int err;
|
||||
tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
|
||||
(unsigned long)qvec);
|
||||
qvec->cmdq = &ndev->pkt_inq[qvec->ring];
|
||||
qvec->valid = true;
|
||||
}
|
||||
|
||||
err = nitrox_setup_pkt_slc_bh(ndev);
|
||||
if (err)
|
||||
return err;
|
||||
/* request irqs for non ring vectors */
|
||||
i = NON_RING_MSIX_BASE;
|
||||
qvec = &ndev->qvec[i];
|
||||
|
||||
err = nitrox_enable_msix(ndev);
|
||||
if (err)
|
||||
goto msix_fail;
|
||||
|
||||
err = nitrox_request_irqs(ndev);
|
||||
if (err)
|
||||
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
|
||||
/* get the vector number */
|
||||
vec = pci_irq_vector(pdev, i);
|
||||
ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
|
||||
if (ret) {
|
||||
dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
|
||||
goto irq_fail;
|
||||
}
|
||||
cpu = num_online_cpus();
|
||||
irq_set_affinity_hint(vec, get_cpu_mask(cpu));
|
||||
|
||||
tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
|
||||
(unsigned long)qvec);
|
||||
qvec->ndev = ndev;
|
||||
qvec->valid = true;
|
||||
|
||||
return 0;
|
||||
|
||||
irq_fail:
|
||||
nitrox_disable_msix(ndev);
|
||||
msix_fail:
|
||||
nitrox_cleanup_pkt_slc_bh(ndev);
|
||||
return err;
|
||||
nitrox_unregister_interrupts(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
10
drivers/crypto/cavium/nitrox/nitrox_isr.h
Normal file
10
drivers/crypto/cavium/nitrox/nitrox_isr.h
Normal file
@@ -0,0 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __NITROX_ISR_H
|
||||
#define __NITROX_ISR_H
|
||||
|
||||
#include "nitrox_dev.h"
|
||||
|
||||
int nitrox_register_interrupts(struct nitrox_device *ndev);
|
||||
void nitrox_unregister_interrupts(struct nitrox_device *ndev);
|
||||
|
||||
#endif /* __NITROX_ISR_H */
|
@@ -17,30 +17,27 @@
|
||||
|
||||
#define CRYPTO_CTX_SIZE 256
|
||||
|
||||
/* command queue alignments */
|
||||
#define PKT_IN_ALIGN 16
|
||||
/* packet inuput ring alignments */
|
||||
#define PKTIN_Q_ALIGN_BYTES 16
|
||||
|
||||
static int cmdq_common_init(struct nitrox_cmdq *cmdq)
|
||||
static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
|
||||
{
|
||||
struct nitrox_device *ndev = cmdq->ndev;
|
||||
u32 qsize;
|
||||
|
||||
qsize = (ndev->qlen) * cmdq->instr_size;
|
||||
cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
|
||||
(qsize + PKT_IN_ALIGN),
|
||||
&cmdq->dma_unaligned,
|
||||
GFP_KERNEL);
|
||||
if (!cmdq->head_unaligned)
|
||||
cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
|
||||
cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
|
||||
&cmdq->unalign_dma,
|
||||
GFP_KERNEL);
|
||||
if (!cmdq->unalign_base)
|
||||
return -ENOMEM;
|
||||
|
||||
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
|
||||
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
|
||||
cmdq->qsize = (qsize + PKT_IN_ALIGN);
|
||||
cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
|
||||
cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
|
||||
cmdq->write_idx = 0;
|
||||
|
||||
spin_lock_init(&cmdq->response_lock);
|
||||
spin_lock_init(&cmdq->cmdq_lock);
|
||||
spin_lock_init(&cmdq->backlog_lock);
|
||||
spin_lock_init(&cmdq->cmd_qlock);
|
||||
spin_lock_init(&cmdq->resp_qlock);
|
||||
spin_lock_init(&cmdq->backlog_qlock);
|
||||
|
||||
INIT_LIST_HEAD(&cmdq->response_head);
|
||||
INIT_LIST_HEAD(&cmdq->backlog_head);
|
||||
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
|
||||
static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
|
||||
{
|
||||
cmdq->write_idx = 0;
|
||||
atomic_set(&cmdq->pending_count, 0);
|
||||
atomic_set(&cmdq->backlog_count, 0);
|
||||
}
|
||||
|
||||
static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
|
||||
{
|
||||
struct nitrox_device *ndev = cmdq->ndev;
|
||||
|
||||
if (!cmdq->unalign_base)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&cmdq->backlog_qflush);
|
||||
|
||||
dma_free_coherent(DEV(ndev), cmdq->qsize,
|
||||
cmdq->head_unaligned, cmdq->dma_unaligned);
|
||||
|
||||
atomic_set(&cmdq->pending_count, 0);
|
||||
atomic_set(&cmdq->backlog_count, 0);
|
||||
cmdq->unalign_base, cmdq->unalign_dma);
|
||||
nitrox_cmdq_reset(cmdq);
|
||||
|
||||
cmdq->dbell_csr_addr = NULL;
|
||||
cmdq->head = NULL;
|
||||
cmdq->compl_cnt_csr_addr = NULL;
|
||||
cmdq->unalign_base = NULL;
|
||||
cmdq->base = NULL;
|
||||
cmdq->unalign_dma = 0;
|
||||
cmdq->dma = 0;
|
||||
cmdq->qsize = 0;
|
||||
cmdq->instr_size = 0;
|
||||
}
|
||||
|
||||
static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
|
||||
static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ndev->nr_queues; i++) {
|
||||
struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
|
||||
struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
|
||||
|
||||
cmdq_common_cleanup(cmdq);
|
||||
nitrox_cmdq_cleanup(cmdq);
|
||||
}
|
||||
kfree(ndev->pkt_cmdqs);
|
||||
ndev->pkt_cmdqs = NULL;
|
||||
kfree(ndev->pkt_inq);
|
||||
ndev->pkt_inq = NULL;
|
||||
}
|
||||
|
||||
static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
|
||||
static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
|
||||
{
|
||||
int i, err, size;
|
||||
int i, err;
|
||||
|
||||
size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
|
||||
ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
|
||||
if (!ndev->pkt_cmdqs)
|
||||
ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
|
||||
sizeof(struct nitrox_cmdq),
|
||||
GFP_KERNEL, ndev->node);
|
||||
if (!ndev->pkt_inq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ndev->nr_queues; i++) {
|
||||
struct nitrox_cmdq *cmdq;
|
||||
u64 offset;
|
||||
|
||||
cmdq = &ndev->pkt_cmdqs[i];
|
||||
cmdq = &ndev->pkt_inq[i];
|
||||
cmdq->ndev = ndev;
|
||||
cmdq->qno = i;
|
||||
cmdq->instr_size = sizeof(struct nps_pkt_instr);
|
||||
|
||||
/* packet input ring doorbell address */
|
||||
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
|
||||
/* SE ring doorbell address for this queue */
|
||||
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
|
||||
/* packet solicit port completion count address */
|
||||
offset = NPS_PKT_SLC_CNTSX(i);
|
||||
cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
|
||||
|
||||
err = cmdq_common_init(cmdq);
|
||||
err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
|
||||
if (err)
|
||||
goto pkt_cmdq_fail;
|
||||
goto pktq_fail;
|
||||
}
|
||||
return 0;
|
||||
|
||||
pkt_cmdq_fail:
|
||||
nitrox_cleanup_pkt_cmdqs(ndev);
|
||||
pktq_fail:
|
||||
nitrox_free_pktin_queues(ndev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
|
||||
|
||||
/* Crypto context pool, 16 byte aligned */
|
||||
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
|
||||
ndev->ctx_pool = dma_pool_create("crypto-context",
|
||||
ndev->ctx_pool = dma_pool_create("nitrox-context",
|
||||
DEV(ndev), size, 16, 0);
|
||||
if (!ndev->ctx_pool)
|
||||
return -ENOMEM;
|
||||
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
|
||||
void *vaddr;
|
||||
dma_addr_t dma;
|
||||
|
||||
vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
|
||||
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
|
||||
if (!vaddr)
|
||||
return NULL;
|
||||
|
||||
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nitrox_init_pkt_cmdqs(ndev);
|
||||
err = nitrox_alloc_pktin_queues(ndev);
|
||||
if (err)
|
||||
destroy_crypto_dma_pool(ndev);
|
||||
|
||||
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
|
||||
*/
|
||||
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
|
||||
{
|
||||
nitrox_cleanup_pkt_cmdqs(ndev);
|
||||
nitrox_free_pktin_queues(ndev);
|
||||
destroy_crypto_dma_pool(ndev);
|
||||
}
|
||||
|
@@ -11,13 +11,15 @@
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_common.h"
|
||||
#include "nitrox_csr.h"
|
||||
#include "nitrox_hal.h"
|
||||
#include "nitrox_isr.h"
|
||||
|
||||
#define CNN55XX_DEV_ID 0x12
|
||||
#define MAX_PF_QUEUES 64
|
||||
#define UCODE_HLEN 48
|
||||
#define SE_GROUP 0
|
||||
|
||||
#define DRIVER_VERSION "1.0"
|
||||
#define DRIVER_VERSION "1.1"
|
||||
#define FW_DIR "cavium/"
|
||||
/* SE microcode */
|
||||
#define SE_FW FW_DIR "cnn55xx_se.fw"
|
||||
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
|
||||
module_param(qlen, uint, 0644);
|
||||
MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
|
||||
#else
|
||||
int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct ucode - Firmware Header
|
||||
* @id: microcode ID
|
||||
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
|
||||
write_to_ucd_unit(ndev, ucode);
|
||||
release_firmware(fw);
|
||||
|
||||
set_bit(NITROX_UCODE_LOADED, &ndev->status);
|
||||
/* barrier to sync with other cpus */
|
||||
smp_mb__after_atomic();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static int nitrox_reset_device(struct pci_dev *pdev)
|
||||
static int nitrox_device_flr(struct pci_dev *pdev)
|
||||
{
|
||||
int pos = 0;
|
||||
|
||||
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pos = pci_pcie_cap(pdev);
|
||||
if (!pos)
|
||||
return -ENOTTY;
|
||||
/* check flr support */
|
||||
if (pcie_has_flr(pdev))
|
||||
pcie_flr(pdev);
|
||||
|
||||
if (!pci_wait_for_pending_transaction(pdev))
|
||||
dev_err(&pdev->dev, "waiting for pending transaction\n");
|
||||
|
||||
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
|
||||
msleep(100);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
return 0;
|
||||
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nitrox_pf_init_isr(ndev);
|
||||
err = nitrox_register_interrupts(ndev);
|
||||
if (err)
|
||||
nitrox_common_sw_cleanup(ndev);
|
||||
|
||||
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
|
||||
|
||||
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
|
||||
{
|
||||
nitrox_pf_cleanup_isr(ndev);
|
||||
nitrox_unregister_interrupts(ndev);
|
||||
nitrox_common_sw_cleanup(ndev);
|
||||
}
|
||||
|
||||
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nitrox_get_hwinfo(struct nitrox_device *ndev)
|
||||
{
|
||||
union emu_fuse_map emu_fuse;
|
||||
u64 offset;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CLUSTERS; i++) {
|
||||
u8 dead_cores;
|
||||
|
||||
offset = EMU_FUSE_MAPX(i);
|
||||
emu_fuse.value = nitrox_read_csr(ndev, offset);
|
||||
if (emu_fuse.s.valid) {
|
||||
dead_cores = hweight32(emu_fuse.s.ae_fuse);
|
||||
ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
|
||||
dead_cores = hweight16(emu_fuse.s.se_fuse);
|
||||
ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int nitrox_pf_hw_init(struct nitrox_device *ndev)
|
||||
{
|
||||
int err;
|
||||
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
static int registers_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
u64 offset;
|
||||
|
||||
/* NPS DMA stats */
|
||||
offset = NPS_STATS_PKT_DMA_RD_CNT;
|
||||
seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
|
||||
nitrox_read_csr(ndev, offset));
|
||||
offset = NPS_STATS_PKT_DMA_WR_CNT;
|
||||
seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
|
||||
nitrox_read_csr(ndev, offset));
|
||||
|
||||
/* BMI/BMO stats */
|
||||
offset = BMI_NPS_PKT_CNT;
|
||||
seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
|
||||
nitrox_read_csr(ndev, offset));
|
||||
offset = BMO_NPS_SLC_PKT_CNT;
|
||||
seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
|
||||
nitrox_read_csr(ndev, offset));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int registers_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, registers_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations register_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = registers_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int firmware_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
|
||||
seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int firmware_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, firmware_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations firmware_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = firmware_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int nitrox_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct nitrox_device *ndev = s->private;
|
||||
|
||||
seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
|
||||
seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
|
||||
seq_printf(s, " Cores [AE: %u SE: %u]\n",
|
||||
ndev->hw.ae_cores, ndev->hw.se_cores);
|
||||
seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
|
||||
seq_printf(s, " Queue length: %u\n", ndev->qlen);
|
||||
seq_printf(s, " Node: %u\n", ndev->node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nitrox_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, nitrox_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations nitrox_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = nitrox_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void nitrox_debugfs_exit(struct nitrox_device *ndev)
|
||||
{
|
||||
debugfs_remove_recursive(ndev->debugfs_dir);
|
||||
ndev->debugfs_dir = NULL;
|
||||
}
|
||||
|
||||
static int nitrox_debugfs_init(struct nitrox_device *ndev)
|
||||
{
|
||||
struct dentry *dir, *f;
|
||||
|
||||
dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
ndev->debugfs_dir = dir;
|
||||
f = debugfs_create_file("counters", 0400, dir, ndev, ®ister_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
|
||||
if (!f)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
nitrox_debugfs_exit(ndev);
|
||||
return -ENODEV;
|
||||
}
|
||||
#else
|
||||
static int nitrox_debugfs_init(struct nitrox_device *ndev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nitrox_debugfs_exit(struct nitrox_device *ndev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* nitrox_probe - NITROX Initialization function.
|
||||
* @pdev: PCI device information struct
|
||||
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
|
||||
return err;
|
||||
|
||||
/* do FLR */
|
||||
err = nitrox_reset_device(pdev);
|
||||
err = nitrox_device_flr(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "FLR failed\n");
|
||||
pci_disable_device(pdev);
|
||||
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
|
||||
if (err)
|
||||
goto pf_hw_fail;
|
||||
|
||||
set_bit(NITROX_READY, &ndev->status);
|
||||
/* clear the statistics */
|
||||
atomic64_set(&ndev->stats.posted, 0);
|
||||
atomic64_set(&ndev->stats.completed, 0);
|
||||
atomic64_set(&ndev->stats.dropped, 0);
|
||||
|
||||
atomic_set(&ndev->state, __NDEV_READY);
|
||||
/* barrier to sync with other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
|
||||
|
||||
crypto_fail:
|
||||
nitrox_debugfs_exit(ndev);
|
||||
clear_bit(NITROX_READY, &ndev->status);
|
||||
atomic_set(&ndev->state, __NDEV_NOT_READY);
|
||||
/* barrier to sync with other cpus */
|
||||
smp_mb__after_atomic();
|
||||
pf_hw_fail:
|
||||
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
|
||||
dev_info(DEV(ndev), "Removing Device %x:%x\n",
|
||||
ndev->hw.vendor_id, ndev->hw.device_id);
|
||||
|
||||
clear_bit(NITROX_READY, &ndev->status);
|
||||
atomic_set(&ndev->state, __NDEV_NOT_READY);
|
||||
/* barrier to sync with other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
nitrox_remove_from_devlist(ndev);
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
/* disable SR-IOV */
|
||||
nitrox_sriov_configure(pdev, 0);
|
||||
#endif
|
||||
nitrox_crypto_unregister();
|
||||
nitrox_debugfs_exit(ndev);
|
||||
nitrox_pf_sw_cleanup(ndev);
|
||||
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
|
||||
.probe = nitrox_probe,
|
||||
.remove = nitrox_remove,
|
||||
.shutdown = nitrox_shutdown,
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
.sriov_configure = nitrox_sriov_configure,
|
||||
#endif
|
||||
};
|
||||
|
||||
module_pci_driver(nitrox_driver);
|
||||
|
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
|
||||
{
|
||||
INIT_LIST_HEAD(&sr->backlog);
|
||||
|
||||
spin_lock_bh(&cmdq->backlog_lock);
|
||||
spin_lock_bh(&cmdq->backlog_qlock);
|
||||
list_add_tail(&sr->backlog, &cmdq->backlog_head);
|
||||
atomic_inc(&cmdq->backlog_count);
|
||||
atomic_set(&sr->status, REQ_BACKLOG);
|
||||
spin_unlock_bh(&cmdq->backlog_lock);
|
||||
spin_unlock_bh(&cmdq->backlog_qlock);
|
||||
}
|
||||
|
||||
static inline void response_list_add(struct nitrox_softreq *sr,
|
||||
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
|
||||
{
|
||||
INIT_LIST_HEAD(&sr->response);
|
||||
|
||||
spin_lock_bh(&cmdq->response_lock);
|
||||
spin_lock_bh(&cmdq->resp_qlock);
|
||||
list_add_tail(&sr->response, &cmdq->response_head);
|
||||
spin_unlock_bh(&cmdq->response_lock);
|
||||
spin_unlock_bh(&cmdq->resp_qlock);
|
||||
}
|
||||
|
||||
static inline void response_list_del(struct nitrox_softreq *sr,
|
||||
struct nitrox_cmdq *cmdq)
|
||||
{
|
||||
spin_lock_bh(&cmdq->response_lock);
|
||||
spin_lock_bh(&cmdq->resp_qlock);
|
||||
list_del(&sr->response);
|
||||
spin_unlock_bh(&cmdq->response_lock);
|
||||
spin_unlock_bh(&cmdq->resp_qlock);
|
||||
}
|
||||
|
||||
static struct nitrox_softreq *
|
||||
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
|
||||
int idx;
|
||||
u8 *ent;
|
||||
|
||||
spin_lock_bh(&cmdq->cmdq_lock);
|
||||
spin_lock_bh(&cmdq->cmd_qlock);
|
||||
|
||||
idx = cmdq->write_idx;
|
||||
/* copy the instruction */
|
||||
ent = cmdq->head + (idx * cmdq->instr_size);
|
||||
ent = cmdq->base + (idx * cmdq->instr_size);
|
||||
memcpy(ent, &sr->instr, cmdq->instr_size);
|
||||
|
||||
atomic_set(&sr->status, REQ_POSTED);
|
||||
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
|
||||
|
||||
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
|
||||
|
||||
spin_unlock_bh(&cmdq->cmdq_lock);
|
||||
spin_unlock_bh(&cmdq->cmd_qlock);
|
||||
|
||||
/* increment the posted command count */
|
||||
atomic64_inc(&ndev->stats.posted);
|
||||
}
|
||||
|
||||
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
|
||||
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
|
||||
if (!atomic_read(&cmdq->backlog_count))
|
||||
return 0;
|
||||
|
||||
spin_lock_bh(&cmdq->backlog_lock);
|
||||
spin_lock_bh(&cmdq->backlog_qlock);
|
||||
|
||||
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
|
||||
struct skcipher_request *skreq;
|
||||
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
|
||||
/* backlog requests are posted, wakeup with -EINPROGRESS */
|
||||
skcipher_request_complete(skreq, -EINPROGRESS);
|
||||
}
|
||||
spin_unlock_bh(&cmdq->backlog_lock);
|
||||
spin_unlock_bh(&cmdq->backlog_qlock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
|
||||
post_backlog_cmds(cmdq);
|
||||
|
||||
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
|
||||
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
||||
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||
/* increment drop count */
|
||||
atomic64_inc(&ndev->stats.dropped);
|
||||
return -ENOSPC;
|
||||
}
|
||||
/* add to backlog list */
|
||||
backlog_list_add(sr, cmdq);
|
||||
return -EBUSY;
|
||||
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
|
||||
/* select the queue */
|
||||
qno = smp_processor_id() % ndev->nr_queues;
|
||||
|
||||
sr->cmdq = &ndev->pkt_cmdqs[qno];
|
||||
sr->cmdq = &ndev->pkt_inq[qno];
|
||||
|
||||
/*
|
||||
* 64-Byte Instruction Format
|
||||
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
|
||||
READ_ONCE(sr->resp.orh));
|
||||
}
|
||||
atomic_dec(&cmdq->pending_count);
|
||||
atomic64_inc(&ndev->stats.completed);
|
||||
/* sync with other cpus */
|
||||
smp_mb__after_atomic();
|
||||
/* remove from response list */
|
||||
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
|
||||
}
|
||||
|
||||
/**
|
||||
* pkt_slc_resp_handler - post processing of SE responses
|
||||
* pkt_slc_resp_tasklet - post processing of SE responses
|
||||
*/
|
||||
void pkt_slc_resp_handler(unsigned long data)
|
||||
void pkt_slc_resp_tasklet(unsigned long data)
|
||||
{
|
||||
struct bh_data *bh = (void *)(uintptr_t)(data);
|
||||
struct nitrox_cmdq *cmdq = bh->cmdq;
|
||||
union nps_pkt_slc_cnts pkt_slc_cnts;
|
||||
struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
|
||||
struct nitrox_cmdq *cmdq = qvec->cmdq;
|
||||
union nps_pkt_slc_cnts slc_cnts;
|
||||
|
||||
/* read completion count */
|
||||
pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
|
||||
slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
|
||||
/* resend the interrupt if more work to do */
|
||||
pkt_slc_cnts.s.resend = 1;
|
||||
slc_cnts.s.resend = 1;
|
||||
|
||||
process_response_list(cmdq);
|
||||
|
||||
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
|
||||
* clear the interrupt with resend bit enabled,
|
||||
* MSI-X interrupt generates if Completion count > Threshold
|
||||
*/
|
||||
writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
|
||||
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
|
||||
/* order the writes */
|
||||
mmiowb();
|
||||
|
||||
|
151
drivers/crypto/cavium/nitrox/nitrox_sriov.c
Normal file
151
drivers/crypto/cavium/nitrox/nitrox_sriov.c
Normal file
@@ -0,0 +1,151 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "nitrox_dev.h"
|
||||
#include "nitrox_hal.h"
|
||||
#include "nitrox_common.h"
|
||||
#include "nitrox_isr.h"
|
||||
|
||||
static inline bool num_vfs_valid(int num_vfs)
|
||||
{
|
||||
bool valid = false;
|
||||
|
||||
switch (num_vfs) {
|
||||
case 16:
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
valid = true;
|
||||
break;
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
static inline enum vf_mode num_vfs_to_mode(int num_vfs)
|
||||
{
|
||||
enum vf_mode mode = 0;
|
||||
|
||||
switch (num_vfs) {
|
||||
case 0:
|
||||
mode = __NDEV_MODE_PF;
|
||||
break;
|
||||
case 16:
|
||||
mode = __NDEV_MODE_VF16;
|
||||
break;
|
||||
case 32:
|
||||
mode = __NDEV_MODE_VF32;
|
||||
break;
|
||||
case 64:
|
||||
mode = __NDEV_MODE_VF64;
|
||||
break;
|
||||
case 128:
|
||||
mode = __NDEV_MODE_VF128;
|
||||
break;
|
||||
}
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static void pf_sriov_cleanup(struct nitrox_device *ndev)
|
||||
{
|
||||
/* PF has no queues in SR-IOV mode */
|
||||
atomic_set(&ndev->state, __NDEV_NOT_READY);
|
||||
/* unregister crypto algorithms */
|
||||
nitrox_crypto_unregister();
|
||||
|
||||
/* cleanup PF resources */
|
||||
nitrox_unregister_interrupts(ndev);
|
||||
nitrox_common_sw_cleanup(ndev);
|
||||
}
|
||||
|
||||
static int pf_sriov_init(struct nitrox_device *ndev)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* allocate resources for PF */
|
||||
err = nitrox_common_sw_init(ndev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = nitrox_register_interrupts(ndev);
|
||||
if (err) {
|
||||
nitrox_common_sw_cleanup(ndev);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* configure the packet queues */
|
||||
nitrox_config_pkt_input_rings(ndev);
|
||||
nitrox_config_pkt_solicit_ports(ndev);
|
||||
|
||||
/* set device to ready state */
|
||||
atomic_set(&ndev->state, __NDEV_READY);
|
||||
|
||||
/* register crypto algorithms */
|
||||
return nitrox_crypto_register();
|
||||
}
|
||||
|
||||
static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
|
||||
{
|
||||
struct nitrox_device *ndev = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
||||
if (!num_vfs_valid(num_vfs)) {
|
||||
dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pci_num_vf(pdev) == num_vfs)
|
||||
return num_vfs;
|
||||
|
||||
err = pci_enable_sriov(pdev, num_vfs);
|
||||
if (err) {
|
||||
dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
|
||||
return err;
|
||||
}
|
||||
dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
|
||||
|
||||
ndev->num_vfs = num_vfs;
|
||||
ndev->mode = num_vfs_to_mode(num_vfs);
|
||||
/* set bit in flags */
|
||||
set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
|
||||
|
||||
/* cleanup PF resources */
|
||||
pf_sriov_cleanup(ndev);
|
||||
|
||||
config_nps_core_vfcfg_mode(ndev, ndev->mode);
|
||||
|
||||
return num_vfs;
|
||||
}
|
||||
|
||||
static int nitrox_sriov_disable(struct pci_dev *pdev)
|
||||
{
|
||||
struct nitrox_device *ndev = pci_get_drvdata(pdev);
|
||||
|
||||
if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
|
||||
return 0;
|
||||
|
||||
if (pci_vfs_assigned(pdev)) {
|
||||
dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
|
||||
return -EPERM;
|
||||
}
|
||||
pci_disable_sriov(pdev);
|
||||
/* clear bit in flags */
|
||||
clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
|
||||
|
||||
ndev->num_vfs = 0;
|
||||
ndev->mode = __NDEV_MODE_PF;
|
||||
|
||||
config_nps_core_vfcfg_mode(ndev, ndev->mode);
|
||||
|
||||
return pf_sriov_init(ndev);
|
||||
}
|
||||
|
||||
int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
{
|
||||
if (!num_vfs)
|
||||
return nitrox_sriov_disable(pdev);
|
||||
|
||||
return nitrox_sriov_enable(pdev, num_vfs);
|
||||
}
|
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
ctx->u.aes.key_len = key_len / 2;
|
||||
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
|
||||
|
||||
return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
|
||||
return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
|
||||
}
|
||||
|
||||
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
||||
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
|
||||
(ctx->u.aes.key_len != AES_KEYSIZE_256))
|
||||
fallback = 1;
|
||||
if (fallback) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
|
||||
ctx->u.aes.tfm_skcipher);
|
||||
|
||||
/* Use the fallback to process the request for any
|
||||
* unsupported unit sizes or key sizes
|
||||
*/
|
||||
skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
|
||||
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_skcipher *fallback_tfm;
|
||||
struct crypto_sync_skcipher *fallback_tfm;
|
||||
|
||||
ctx->complete = ccp_aes_xts_complete;
|
||||
ctx->u.aes.key_len = 0;
|
||||
|
||||
fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
|
||||
fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback_tfm)) {
|
||||
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
|
||||
crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
|
||||
}
|
||||
|
||||
static int ccp_register_aes_xts_alg(struct list_head *head,
|
||||
|
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
|
||||
/***** AES related defines *****/
|
||||
struct ccp_aes_ctx {
|
||||
/* Fallback cipher for XTS with unsupported unit sizes */
|
||||
struct crypto_skcipher *tfm_skcipher;
|
||||
struct crypto_sync_skcipher *tfm_skcipher;
|
||||
|
||||
/* Cipher used to generate CMAC K1/K2 keys */
|
||||
struct crypto_cipher *tfm_cipher;
|
||||
|
@@ -31,8 +31,9 @@
|
||||
((psp_master->api_major) >= _maj && \
|
||||
(psp_master->api_minor) >= _min)
|
||||
|
||||
#define DEVICE_NAME "sev"
|
||||
#define SEV_FW_FILE "amd/sev.fw"
|
||||
#define DEVICE_NAME "sev"
|
||||
#define SEV_FW_FILE "amd/sev.fw"
|
||||
#define SEV_FW_NAME_SIZE 64
|
||||
|
||||
static DEFINE_MUTEX(sev_cmd_mutex);
|
||||
static struct sev_misc_dev *misc_dev;
|
||||
@@ -423,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
|
||||
static int sev_get_api_version(void)
|
||||
{
|
||||
struct sev_user_data_status *status;
|
||||
int error, ret;
|
||||
int error = 0, ret;
|
||||
|
||||
status = &psp_master->status_cmd_buf;
|
||||
ret = sev_platform_status(status, &error);
|
||||
@@ -440,6 +441,41 @@ static int sev_get_api_version(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_get_firmware(struct device *dev,
|
||||
const struct firmware **firmware)
|
||||
{
|
||||
char fw_name_specific[SEV_FW_NAME_SIZE];
|
||||
char fw_name_subset[SEV_FW_NAME_SIZE];
|
||||
|
||||
snprintf(fw_name_specific, sizeof(fw_name_specific),
|
||||
"amd/amd_sev_fam%.2xh_model%.2xh.sbin",
|
||||
boot_cpu_data.x86, boot_cpu_data.x86_model);
|
||||
|
||||
snprintf(fw_name_subset, sizeof(fw_name_subset),
|
||||
"amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
|
||||
boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
|
||||
|
||||
/* Check for SEV FW for a particular model.
|
||||
* Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
|
||||
*
|
||||
* or
|
||||
*
|
||||
* Check for SEV FW common to a subset of models.
|
||||
* Ex. amd_sev_fam17h_model0xh.sbin for
|
||||
* Family 17h Model 00h -- Family 17h Model 0Fh
|
||||
*
|
||||
* or
|
||||
*
|
||||
* Fall-back to using generic name: sev.fw
|
||||
*/
|
||||
if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
|
||||
(firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
|
||||
(firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
|
||||
return 0;
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
|
||||
static int sev_update_firmware(struct device *dev)
|
||||
{
|
||||
@@ -449,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
|
||||
struct page *p;
|
||||
u64 data_size;
|
||||
|
||||
ret = request_firmware(&firmware, SEV_FW_FILE, dev);
|
||||
if (ret < 0)
|
||||
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
|
||||
dev_dbg(dev, "No SEV firmware file present\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* SEV FW expects the physical address given to it to be 32
|
||||
|
@@ -33,8 +33,31 @@ struct sp_platform {
|
||||
unsigned int irq_count;
|
||||
};
|
||||
|
||||
static const struct acpi_device_id sp_acpi_match[];
|
||||
static const struct of_device_id sp_of_match[];
|
||||
static const struct sp_dev_vdata dev_vdata[] = {
|
||||
{
|
||||
.bar = 0,
|
||||
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
|
||||
.ccp_vdata = &ccpv3_platform,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id sp_acpi_match[] = {
|
||||
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id sp_of_match[] = {
|
||||
{ .compatible = "amd,ccp-seattle-v1a",
|
||||
.data = (const void *)&dev_vdata[0] },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sp_of_match);
|
||||
#endif
|
||||
|
||||
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
|
||||
{
|
||||
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct sp_dev_vdata dev_vdata[] = {
|
||||
{
|
||||
.bar = 0,
|
||||
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
|
||||
.ccp_vdata = &ccpv3_platform,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id sp_acpi_match[] = {
|
||||
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id sp_of_match[] = {
|
||||
{ .compatible = "amd,ccp-seattle-v1a",
|
||||
.data = (const void *)&dev_vdata[0] },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sp_of_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver sp_platform_driver = {
|
||||
.driver = {
|
||||
.name = "ccp",
|
||||
|
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
|
||||
enum drv_cipher_mode mode)
|
||||
static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
|
||||
}
|
||||
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
|
||||
* @pdesc: pointer HW descriptor struct
|
||||
* @mode: Any one of the modes defined in [CC7x-DESC]
|
||||
*/
|
||||
static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
|
||||
enum drv_crypto_direction mode)
|
||||
static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
|
||||
{
|
||||
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
|
||||
}
|
||||
|
@@ -673,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
|
||||
return min(srclen, dstlen);
|
||||
}
|
||||
|
||||
static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
|
||||
static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
|
||||
u32 flags,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst,
|
||||
@@ -683,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
|
||||
{
|
||||
int err;
|
||||
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
|
||||
|
||||
skcipher_request_set_tfm(subreq, cipher);
|
||||
skcipher_request_set_sync_tfm(subreq, cipher);
|
||||
skcipher_request_set_callback(subreq, flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, src, dst,
|
||||
nbytes, iv);
|
||||
@@ -856,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
|
||||
int err = 0;
|
||||
|
||||
crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
|
||||
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
|
||||
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |=
|
||||
crypto_skcipher_get_flags(ablkctx->sw_cipher) &
|
||||
crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
return err;
|
||||
}
|
||||
@@ -1337,8 +1338,7 @@ static int chcr_device_init(struct chcr_context *ctx)
|
||||
}
|
||||
ctx->dev = u_ctx->dev;
|
||||
adap = padap(ctx->dev);
|
||||
ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
|
||||
adap->vres.ncrypto_fc);
|
||||
ntxq = u_ctx->lldi.ntxq;
|
||||
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
||||
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
||||
spin_lock(&ctx->dev->lock_chcr_dev);
|
||||
@@ -1369,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
|
||||
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||
|
||||
ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ablkctx->sw_cipher)) {
|
||||
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
||||
return PTR_ERR(ablkctx->sw_cipher);
|
||||
@@ -1399,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
|
||||
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
|
||||
* cannot be used as fallback in chcr_handle_cipher_response
|
||||
*/
|
||||
ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ablkctx->sw_cipher)) {
|
||||
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
||||
return PTR_ERR(ablkctx->sw_cipher);
|
||||
@@ -1415,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
|
||||
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
|
||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||
|
||||
crypto_free_skcipher(ablkctx->sw_cipher);
|
||||
crypto_free_sync_skcipher(ablkctx->sw_cipher);
|
||||
if (ablkctx->aes_generic)
|
||||
crypto_free_cipher(ablkctx->aes_generic);
|
||||
}
|
||||
|
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
|
||||
static struct cxgb4_uld_info chcr_uld_info = {
|
||||
.name = DRV_MODULE_NAME,
|
||||
.nrxq = MAX_ULD_QSETS,
|
||||
.ntxq = MAX_ULD_QSETS,
|
||||
/* Max ntxq will be derived from fw config file*/
|
||||
.rxq_size = 1024,
|
||||
.add = chcr_uld_add,
|
||||
.state_change = chcr_uld_state_change,
|
||||
|
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
|
||||
}
|
||||
|
||||
struct ablk_ctx {
|
||||
struct crypto_skcipher *sw_cipher;
|
||||
struct crypto_sync_skcipher *sw_cipher;
|
||||
struct crypto_cipher *aes_generic;
|
||||
__be32 key_ctx_hdr;
|
||||
unsigned int enckey_len;
|
||||
|
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
|
||||
|
||||
return;
|
||||
out:
|
||||
if (skb)
|
||||
kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void release_tcp_port(struct sock *sk)
|
||||
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
|
||||
|
||||
int chtls_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
struct chtls_sock *csk;
|
||||
struct tcp_sock *tp;
|
||||
int err;
|
||||
|
||||
tp = tcp_sk(sk);
|
||||
csk = rcu_dereference_sk_user_data(sk);
|
||||
chtls_purge_recv_queue(sk);
|
||||
chtls_purge_receive_queue(sk);
|
||||
chtls_purge_write_queue(sk);
|
||||
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
const struct cpl_pass_accept_req *req,
|
||||
struct chtls_dev *cdev)
|
||||
{
|
||||
const struct tcphdr *tcph;
|
||||
struct inet_sock *newinet;
|
||||
const struct iphdr *iph;
|
||||
struct net_device *ndev;
|
||||
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
|
||||
if (!dst)
|
||||
goto free_sk;
|
||||
|
||||
tcph = (struct tcphdr *)(iph + 1);
|
||||
n = dst_neigh_lookup(dst, &iph->saddr);
|
||||
if (!n)
|
||||
goto free_sk;
|
||||
|
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
|
||||
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
|
||||
kfree_skb(cdev->rspq_skb_cache[i]);
|
||||
kfree(cdev->lldi);
|
||||
if (cdev->askb)
|
||||
kfree_skb(cdev->askb);
|
||||
kfree_skb(cdev->askb);
|
||||
kfree(cdev);
|
||||
}
|
||||
|
||||
|
@@ -28,9 +28,24 @@
|
||||
|
||||
#define DCP_MAX_CHANS 4
|
||||
#define DCP_BUF_SZ PAGE_SIZE
|
||||
#define DCP_SHA_PAY_SZ 64
|
||||
|
||||
#define DCP_ALIGNMENT 64
|
||||
|
||||
/*
|
||||
* Null hashes to align with hw behavior on imx6sl and ull
|
||||
* these are flipped for consistency with hw output
|
||||
*/
|
||||
static const uint8_t sha1_null_hash[] =
|
||||
"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
|
||||
"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
|
||||
|
||||
static const uint8_t sha256_null_hash[] =
|
||||
"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
|
||||
"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
|
||||
"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
|
||||
"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
|
||||
|
||||
/* DCP DMA descriptor. */
|
||||
struct dcp_dma_desc {
|
||||
uint32_t next_cmd_addr;
|
||||
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
|
||||
uint8_t aes_in_buf[DCP_BUF_SZ];
|
||||
uint8_t aes_out_buf[DCP_BUF_SZ];
|
||||
uint8_t sha_in_buf[DCP_BUF_SZ];
|
||||
uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
|
||||
|
||||
uint8_t aes_key[2 * AES_KEYSIZE_128];
|
||||
|
||||
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
|
||||
unsigned int hot:1;
|
||||
|
||||
/* Crypto-specific context */
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
unsigned int key_len;
|
||||
uint8_t key[AES_KEYSIZE_128];
|
||||
};
|
||||
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
|
||||
unsigned int fini:1;
|
||||
};
|
||||
|
||||
struct dcp_export_state {
|
||||
struct dcp_sha_req_ctx req_ctx;
|
||||
struct dcp_async_ctx async_ctx;
|
||||
};
|
||||
|
||||
/*
|
||||
* There can even be only one instance of the MXS DCP due to the
|
||||
* design of Linux Crypto API.
|
||||
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
|
||||
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
|
||||
DCP_BUF_SZ, DMA_FROM_DEVICE);
|
||||
|
||||
if (actx->fill % AES_BLOCK_SIZE) {
|
||||
dev_err(sdcp->dev, "Invalid block size!\n");
|
||||
ret = -EINVAL;
|
||||
goto aes_done_run;
|
||||
}
|
||||
|
||||
/* Fill in the DMA descriptor. */
|
||||
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
|
||||
MXS_DCP_CONTROL0_INTERRUPT |
|
||||
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
|
||||
|
||||
ret = mxs_dcp_start_dma(actx);
|
||||
|
||||
aes_done_run:
|
||||
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
|
||||
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
|
||||
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
|
||||
uint32_t dst_off = 0;
|
||||
uint32_t last_out_len = 0;
|
||||
|
||||
uint8_t *key = sdcp->coh->aes_key;
|
||||
|
||||
int ret = 0;
|
||||
int split = 0;
|
||||
unsigned int i, len, clen, rem = 0;
|
||||
unsigned int i, len, clen, rem = 0, tlen = 0;
|
||||
int init = 0;
|
||||
bool limit_hit = false;
|
||||
|
||||
actx->fill = 0;
|
||||
|
||||
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
for_each_sg(req->src, src, nents, i) {
|
||||
src_buf = sg_virt(src);
|
||||
len = sg_dma_len(src);
|
||||
tlen += len;
|
||||
limit_hit = tlen > req->nbytes;
|
||||
|
||||
if (limit_hit)
|
||||
len = req->nbytes - (tlen - len);
|
||||
|
||||
do {
|
||||
if (actx->fill + len > out_off)
|
||||
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
* If we filled the buffer or this is the last SG,
|
||||
* submit the buffer.
|
||||
*/
|
||||
if (actx->fill == out_off || sg_is_last(src)) {
|
||||
if (actx->fill == out_off || sg_is_last(src) ||
|
||||
limit_hit) {
|
||||
ret = mxs_dcp_run_aes(actx, req, init);
|
||||
if (ret)
|
||||
return ret;
|
||||
init = 0;
|
||||
|
||||
out_tmp = out_buf;
|
||||
last_out_len = actx->fill;
|
||||
while (dst && actx->fill) {
|
||||
if (!split) {
|
||||
dst_buf = sg_virt(dst);
|
||||
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
|
||||
}
|
||||
}
|
||||
} while (len);
|
||||
|
||||
if (limit_hit)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Copy the IV for CBC for chaining */
|
||||
if (!rctx->ecb) {
|
||||
if (rctx->enc)
|
||||
memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
|
||||
AES_BLOCK_SIZE);
|
||||
else
|
||||
memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
|
||||
AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -380,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
int ret;
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
req->nbytes, req->info);
|
||||
@@ -464,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
* but is supported by in-kernel software implementation, we use
|
||||
* software fallback.
|
||||
*/
|
||||
crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(actx->fallback,
|
||||
crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(actx->fallback,
|
||||
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_skcipher_setkey(actx->fallback, key, len);
|
||||
ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
|
||||
tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
|
||||
return ret;
|
||||
@@ -482,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = crypto_tfm_alg_name(tfm);
|
||||
const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
|
||||
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_skcipher *blk;
|
||||
struct crypto_sync_skcipher *blk;
|
||||
|
||||
blk = crypto_alloc_skcipher(name, 0, flags);
|
||||
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(blk))
|
||||
return PTR_ERR(blk);
|
||||
|
||||
@@ -499,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(actx->fallback);
|
||||
crypto_free_sync_skcipher(actx->fallback);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -513,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
|
||||
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
|
||||
|
||||
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
|
||||
|
||||
dma_addr_t digest_phys = 0;
|
||||
@@ -536,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
|
||||
desc->payload = 0;
|
||||
desc->status = 0;
|
||||
|
||||
/*
|
||||
* Align driver with hw behavior when generating null hashes
|
||||
*/
|
||||
if (rctx->init && rctx->fini && desc->size == 0) {
|
||||
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
|
||||
const uint8_t *sha_buf =
|
||||
(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
|
||||
sha1_null_hash : sha256_null_hash;
|
||||
memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
|
||||
ret = 0;
|
||||
goto done_run;
|
||||
}
|
||||
|
||||
/* Set HASH_TERM bit for last transfer block. */
|
||||
if (rctx->fini) {
|
||||
digest_phys = dma_map_single(sdcp->dev, req->result,
|
||||
halg->digestsize, DMA_FROM_DEVICE);
|
||||
digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
|
||||
DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
|
||||
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
|
||||
desc->payload = digest_phys;
|
||||
}
|
||||
@@ -547,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
|
||||
ret = mxs_dcp_start_dma(actx);
|
||||
|
||||
if (rctx->fini)
|
||||
dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
|
||||
dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
done_run:
|
||||
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
|
||||
|
||||
return ret;
|
||||
@@ -567,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
|
||||
const int nents = sg_nents(req->src);
|
||||
|
||||
uint8_t *in_buf = sdcp->coh->sha_in_buf;
|
||||
uint8_t *out_buf = sdcp->coh->sha_out_buf;
|
||||
|
||||
uint8_t *src_buf;
|
||||
|
||||
@@ -621,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
|
||||
|
||||
actx->fill = 0;
|
||||
|
||||
/* For some reason, the result is flipped. */
|
||||
for (i = 0; i < halg->digestsize / 2; i++) {
|
||||
swap(req->result[i],
|
||||
req->result[halg->digestsize - i - 1]);
|
||||
}
|
||||
/* For some reason the result is flipped */
|
||||
for (i = 0; i < halg->digestsize; i++)
|
||||
req->result[i] = out_buf[halg->digestsize - i - 1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -766,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
|
||||
return dcp_sha_finup(req);
|
||||
}
|
||||
|
||||
static int dcp_sha_noimport(struct ahash_request *req, const void *in)
|
||||
static int dcp_sha_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
return -ENOSYS;
|
||||
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
|
||||
const struct dcp_export_state *export = in;
|
||||
|
||||
memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
|
||||
memset(actx, 0, sizeof(struct dcp_async_ctx));
|
||||
memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
|
||||
memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dcp_sha_noexport(struct ahash_request *req, void *out)
|
||||
static int dcp_sha_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
return -ENOSYS;
|
||||
struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
|
||||
struct dcp_export_state *export = out;
|
||||
|
||||
memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
|
||||
memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
|
||||
@@ -846,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
|
||||
.final = dcp_sha_final,
|
||||
.finup = dcp_sha_finup,
|
||||
.digest = dcp_sha_digest,
|
||||
.import = dcp_sha_noimport,
|
||||
.export = dcp_sha_noexport,
|
||||
.import = dcp_sha_import,
|
||||
.export = dcp_sha_export,
|
||||
.halg = {
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct dcp_export_state),
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name = "sha1-dcp",
|
||||
@@ -872,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
|
||||
.final = dcp_sha_final,
|
||||
.finup = dcp_sha_finup,
|
||||
.digest = dcp_sha_digest,
|
||||
.import = dcp_sha_noimport,
|
||||
.export = dcp_sha_noexport,
|
||||
.import = dcp_sha_import,
|
||||
.export = dcp_sha_export,
|
||||
.halg = {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.statesize = sizeof(struct dcp_export_state),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-dcp",
|
||||
|
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
!!(mode & FLAGS_CBC));
|
||||
|
||||
if (req->nbytes < aes_fallback_sz) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, NULL,
|
||||
NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
memcpy(ctx->key, key, keylen);
|
||||
ctx->keylen = keylen;
|
||||
|
||||
crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
|
||||
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
|
||||
static int omap_aes_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = crypto_tfm_alg_name(tfm);
|
||||
const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
|
||||
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_skcipher *blk;
|
||||
struct crypto_sync_skcipher *blk;
|
||||
|
||||
blk = crypto_alloc_skcipher(name, 0, flags);
|
||||
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(blk))
|
||||
return PTR_ERR(blk);
|
||||
|
||||
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
|
||||
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->fallback)
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
|
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
|
||||
int keylen;
|
||||
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
|
||||
u8 nonce[4];
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct crypto_skcipher *ctr;
|
||||
};
|
||||
|
||||
|
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
|
||||
* The fallback cipher. If the operation can't be done in hardware,
|
||||
* fallback to a software version.
|
||||
*/
|
||||
struct crypto_skcipher *sw_cipher;
|
||||
struct crypto_sync_skcipher *sw_cipher;
|
||||
};
|
||||
|
||||
/* AEAD cipher context. */
|
||||
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
||||
* Set the fallback transform to use the same request flags as
|
||||
* the hardware transform.
|
||||
*/
|
||||
crypto_skcipher_clear_flags(ctx->sw_cipher,
|
||||
crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(ctx->sw_cipher,
|
||||
crypto_sync_skcipher_set_flags(ctx->sw_cipher,
|
||||
cipher->base.crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
|
||||
err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
|
||||
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |=
|
||||
crypto_skcipher_get_flags(ctx->sw_cipher) &
|
||||
crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
|
||||
if (err)
|
||||
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
|
||||
struct crypto_tfm *old_tfm =
|
||||
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
|
||||
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
|
||||
int err;
|
||||
|
||||
/*
|
||||
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
|
||||
* the ciphering has completed, put the old transform back into the
|
||||
* request.
|
||||
*/
|
||||
skcipher_request_set_tfm(subreq, ctx->sw_cipher);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
|
||||
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
req->nbytes, req->info);
|
||||
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
|
||||
ctx->generic.flags = spacc_alg->type;
|
||||
ctx->generic.engine = engine;
|
||||
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
|
||||
ctx->sw_cipher = crypto_alloc_skcipher(
|
||||
alg->cra_name, 0, CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
ctx->sw_cipher = crypto_alloc_sync_skcipher(
|
||||
alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->sw_cipher)) {
|
||||
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
|
||||
alg->cra_name);
|
||||
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->sw_cipher);
|
||||
crypto_free_sync_skcipher(ctx->sw_cipher);
|
||||
}
|
||||
|
||||
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
|
||||
|
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
|
||||
struct crypto_shash *hash_tfm;
|
||||
enum icp_qat_hw_auth_algo qat_hash_alg;
|
||||
struct qat_crypto_instance *inst;
|
||||
union {
|
||||
struct sha1_state sha1;
|
||||
struct sha256_state sha256;
|
||||
struct sha512_state sha512;
|
||||
};
|
||||
char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
|
||||
char opad[SHA512_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
struct qat_alg_ablkcipher_ctx {
|
||||
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
unsigned int auth_keylen)
|
||||
{
|
||||
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
|
||||
struct sha1_state sha1;
|
||||
struct sha256_state sha256;
|
||||
struct sha512_state sha512;
|
||||
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
|
||||
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
|
||||
char ipad[block_size];
|
||||
char opad[block_size];
|
||||
__be32 *hash_state_out;
|
||||
__be64 *hash512_state_out;
|
||||
int i, offset;
|
||||
|
||||
memset(ipad, 0, block_size);
|
||||
memset(opad, 0, block_size);
|
||||
memset(ctx->ipad, 0, block_size);
|
||||
memset(ctx->opad, 0, block_size);
|
||||
shash->tfm = ctx->hash_tfm;
|
||||
shash->flags = 0x0;
|
||||
|
||||
if (auth_keylen > block_size) {
|
||||
int ret = crypto_shash_digest(shash, auth_key,
|
||||
auth_keylen, ipad);
|
||||
auth_keylen, ctx->ipad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(opad, ipad, digest_size);
|
||||
memcpy(ctx->opad, ctx->ipad, digest_size);
|
||||
} else {
|
||||
memcpy(ipad, auth_key, auth_keylen);
|
||||
memcpy(opad, auth_key, auth_keylen);
|
||||
memcpy(ctx->ipad, auth_key, auth_keylen);
|
||||
memcpy(ctx->opad, auth_key, auth_keylen);
|
||||
}
|
||||
|
||||
for (i = 0; i < block_size; i++) {
|
||||
char *ipad_ptr = ipad + i;
|
||||
char *opad_ptr = opad + i;
|
||||
char *ipad_ptr = ctx->ipad + i;
|
||||
char *opad_ptr = ctx->opad + i;
|
||||
*ipad_ptr ^= HMAC_IPAD_VALUE;
|
||||
*opad_ptr ^= HMAC_OPAD_VALUE;
|
||||
}
|
||||
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
if (crypto_shash_init(shash))
|
||||
return -EFAULT;
|
||||
|
||||
if (crypto_shash_update(shash, ipad, block_size))
|
||||
if (crypto_shash_update(shash, ctx->ipad, block_size))
|
||||
return -EFAULT;
|
||||
|
||||
hash_state_out = (__be32 *)hash->sha.state1;
|
||||
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
|
||||
switch (ctx->qat_hash_alg) {
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA1:
|
||||
if (crypto_shash_export(shash, &sha1))
|
||||
if (crypto_shash_export(shash, &ctx->sha1))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
|
||||
*hash_state_out = cpu_to_be32(*(sha1.state + i));
|
||||
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
|
||||
break;
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA256:
|
||||
if (crypto_shash_export(shash, &sha256))
|
||||
if (crypto_shash_export(shash, &ctx->sha256))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
|
||||
*hash_state_out = cpu_to_be32(*(sha256.state + i));
|
||||
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
|
||||
break;
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA512:
|
||||
if (crypto_shash_export(shash, &sha512))
|
||||
if (crypto_shash_export(shash, &ctx->sha512))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
|
||||
*hash512_state_out = cpu_to_be64(*(sha512.state + i));
|
||||
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
|
||||
break;
|
||||
default:
|
||||
return -EFAULT;
|
||||
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
if (crypto_shash_init(shash))
|
||||
return -EFAULT;
|
||||
|
||||
if (crypto_shash_update(shash, opad, block_size))
|
||||
if (crypto_shash_update(shash, ctx->opad, block_size))
|
||||
return -EFAULT;
|
||||
|
||||
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
|
||||
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
|
||||
|
||||
switch (ctx->qat_hash_alg) {
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA1:
|
||||
if (crypto_shash_export(shash, &sha1))
|
||||
if (crypto_shash_export(shash, &ctx->sha1))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
|
||||
*hash_state_out = cpu_to_be32(*(sha1.state + i));
|
||||
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
|
||||
break;
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA256:
|
||||
if (crypto_shash_export(shash, &sha256))
|
||||
if (crypto_shash_export(shash, &ctx->sha256))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
|
||||
*hash_state_out = cpu_to_be32(*(sha256.state + i));
|
||||
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
|
||||
break;
|
||||
case ICP_QAT_HW_AUTH_ALGO_SHA512:
|
||||
if (crypto_shash_export(shash, &sha512))
|
||||
if (crypto_shash_export(shash, &ctx->sha512))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
|
||||
*hash512_state_out = cpu_to_be64(*(sha512.state + i));
|
||||
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
|
||||
break;
|
||||
default:
|
||||
return -EFAULT;
|
||||
}
|
||||
memzero_explicit(ipad, block_size);
|
||||
memzero_explicit(opad, block_size);
|
||||
memzero_explicit(ctx->ipad, block_size);
|
||||
memzero_explicit(ctx->opad, block_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
|
||||
memcpy(ctx->enc_key, key, keylen);
|
||||
return 0;
|
||||
fallback:
|
||||
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
if (!ret)
|
||||
ctx->enc_keylen = keylen;
|
||||
return ret;
|
||||
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
|
||||
|
||||
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
|
||||
ctx->enc_keylen != AES_KEYSIZE_256) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
|
||||
|
||||
ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
|
||||
0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
return PTR_ERR_OR_ZERO(ctx->fallback);
|
||||
}
|
||||
|
||||
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
struct qce_ablkcipher_def {
|
||||
|
@@ -22,7 +22,7 @@
|
||||
struct qce_cipher_ctx {
|
||||
u8 enc_key[QCE_MAX_KEY_SIZE];
|
||||
unsigned int enc_keylen;
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
|
||||
struct s5p_aes_ctx {
|
||||
struct s5p_aes_dev *dev;
|
||||
|
||||
uint8_t aes_key[AES_MAX_KEY_SIZE];
|
||||
uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
|
||||
u8 aes_key[AES_MAX_KEY_SIZE];
|
||||
u8 nonce[CTR_RFC3686_NONCE_SIZE];
|
||||
int keylen;
|
||||
};
|
||||
|
||||
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
|
||||
}
|
||||
|
||||
/* Calls the completion. Cannot be called with dev->lock hold. */
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
static void s5p_aes_complete(struct ablkcipher_request *req, int err)
|
||||
{
|
||||
dev->req->base.complete(&dev->req->base, err);
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
|
||||
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
|
||||
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
|
||||
}
|
||||
|
||||
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
|
||||
struct scatterlist **dst)
|
||||
struct scatterlist **dst)
|
||||
{
|
||||
void *pages;
|
||||
int len;
|
||||
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
|
||||
|
||||
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
|
||||
{
|
||||
int err;
|
||||
if (!sg->length)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sg->length) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
|
||||
if (!err) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
|
||||
return -ENOMEM;
|
||||
|
||||
dev->sg_dst = sg;
|
||||
err = 0;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
|
||||
{
|
||||
int err;
|
||||
if (!sg->length)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sg->length) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
|
||||
if (!err) {
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
|
||||
return -ENOMEM;
|
||||
|
||||
dev->sg_src = sg;
|
||||
err = 0;
|
||||
|
||||
exit:
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct platform_device *pdev = dev_id;
|
||||
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
|
||||
struct ablkcipher_request *req;
|
||||
int err_dma_tx = 0;
|
||||
int err_dma_rx = 0;
|
||||
int err_dma_hx = 0;
|
||||
bool tx_end = false;
|
||||
bool hx_end = false;
|
||||
unsigned long flags;
|
||||
uint32_t status;
|
||||
u32 st_bits;
|
||||
u32 status, st_bits;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
s5p_aes_complete(dev, 0);
|
||||
s5p_aes_complete(dev->req, 0);
|
||||
/* Device is still busy */
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
} else {
|
||||
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
error:
|
||||
s5p_sg_done(dev);
|
||||
dev->busy = false;
|
||||
req = dev->req;
|
||||
if (err_dma_hx == 1)
|
||||
s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
s5p_aes_complete(req, err);
|
||||
|
||||
hash_irq_end:
|
||||
/*
|
||||
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
|
||||
};
|
||||
|
||||
static void s5p_set_aes(struct s5p_aes_dev *dev,
|
||||
const uint8_t *key, const uint8_t *iv,
|
||||
const u8 *key, const u8 *iv, const u8 *ctr,
|
||||
unsigned int keylen)
|
||||
{
|
||||
void __iomem *keystart;
|
||||
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
|
||||
if (iv)
|
||||
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
|
||||
|
||||
if (ctr)
|
||||
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
|
||||
|
||||
if (keylen == AES_KEYSIZE_256)
|
||||
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
|
||||
else if (keylen == AES_KEYSIZE_192)
|
||||
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
|
||||
}
|
||||
|
||||
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
|
||||
struct ablkcipher_request *req)
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int err;
|
||||
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
|
||||
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
|
||||
{
|
||||
struct ablkcipher_request *req = dev->req;
|
||||
uint32_t aes_control;
|
||||
u32 aes_control;
|
||||
unsigned long flags;
|
||||
int err;
|
||||
u8 *iv;
|
||||
u8 *iv, *ctr;
|
||||
|
||||
/* This sets bit [13:12] to 00, which selects 128-bit counter */
|
||||
aes_control = SSS_AES_KEY_CHANGE_MODE;
|
||||
if (mode & FLAGS_AES_DECRYPT)
|
||||
aes_control |= SSS_AES_MODE_DECRYPT;
|
||||
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
|
||||
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
|
||||
aes_control |= SSS_AES_CHAIN_MODE_CBC;
|
||||
iv = req->info;
|
||||
ctr = NULL;
|
||||
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
|
||||
aes_control |= SSS_AES_CHAIN_MODE_CTR;
|
||||
iv = req->info;
|
||||
iv = NULL;
|
||||
ctr = req->info;
|
||||
} else {
|
||||
iv = NULL; /* AES_ECB */
|
||||
ctr = NULL;
|
||||
}
|
||||
|
||||
if (dev->ctx->keylen == AES_KEYSIZE_192)
|
||||
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
|
||||
goto outdata_error;
|
||||
|
||||
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
|
||||
s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
|
||||
s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
|
||||
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
@@ -1983,7 +1973,7 @@ indata_error:
|
||||
s5p_sg_done(dev);
|
||||
dev->busy = false;
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
s5p_aes_complete(req, err);
|
||||
}
|
||||
|
||||
static void s5p_tasklet_cb(unsigned long data)
|
||||
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
|
||||
err = ablkcipher_enqueue_request(&dev->queue, req);
|
||||
if (dev->busy) {
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
goto exit;
|
||||
return err;
|
||||
}
|
||||
dev->busy = true;
|
||||
|
||||
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
|
||||
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
struct s5p_aes_dev *dev = ctx->dev;
|
||||
|
||||
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
|
||||
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
|
||||
((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
|
||||
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
|
||||
}
|
||||
|
||||
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
|
||||
const uint8_t *key, unsigned int keylen)
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
||||
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
|
||||
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
|
||||
}
|
||||
|
||||
static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return s5p_aes_crypt(req, FLAGS_AES_CTR);
|
||||
}
|
||||
|
||||
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
|
||||
.decrypt = s5p_aes_cbc_decrypt,
|
||||
}
|
||||
},
|
||||
{
|
||||
.cra_name = "ctr(aes)",
|
||||
.cra_driver_name = "ctr-aes-s5p",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct s5p_aes_ctx),
|
||||
.cra_alignmask = 0x0f,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = s5p_aes_cra_init,
|
||||
.cra_u.ablkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = s5p_aes_setkey,
|
||||
.encrypt = s5p_aes_ctr_crypt,
|
||||
.decrypt = s5p_aes_ctr_crypt,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static int s5p_aes_probe(struct platform_device *pdev)
|
||||
|
@@ -149,7 +149,7 @@ struct sahara_ctx {
|
||||
/* AES-specific context */
|
||||
int keylen;
|
||||
u8 key[AES_KEYSIZE_128];
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
};
|
||||
|
||||
struct sahara_aes_reqctx {
|
||||
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
/*
|
||||
* The requested key size is not supported by HW, do a fallback.
|
||||
*/
|
||||
crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
|
||||
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
|
||||
tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
|
||||
CRYPTO_TFM_RES_MASK;
|
||||
return ret;
|
||||
}
|
||||
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
|
||||
int err;
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
|
||||
int err;
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
|
||||
int err;
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
|
||||
int err;
|
||||
|
||||
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
|
||||
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
|
||||
|
||||
skcipher_request_set_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
|
||||
skcipher_request_set_callback(subreq, req->base.flags,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(subreq, req->src, req->dst,
|
||||
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
|
||||
const char *name = crypto_tfm_alg_name(tfm);
|
||||
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->fallback = crypto_alloc_skcipher(name, 0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->fallback)) {
|
||||
pr_err("Error allocating fallback algo %s\n", name);
|
||||
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_cbc_ctx {
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
struct aes_key dec_key;
|
||||
};
|
||||
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
|
||||
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
fallback = crypto_alloc_skcipher(alg, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_skcipher_set_flags(
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
ctx->fallback = fallback;
|
||||
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
|
||||
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_tfm(req, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_tfm(req, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_decrypt(req);
|
||||
|
@@ -32,18 +32,18 @@
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_ctr_ctx {
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
};
|
||||
|
||||
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
fallback = crypto_alloc_skcipher(alg, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_skcipher_set_flags(
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
ctx->fallback = fallback;
|
||||
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
|
||||
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
}
|
||||
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_tfm(req, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = crypto_skcipher_encrypt(req);
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "aesp8-ppc.h"
|
||||
|
||||
struct p8_aes_xts_ctx {
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct aes_key enc_key;
|
||||
struct aes_key dec_key;
|
||||
struct aes_key tweak_key;
|
||||
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
|
||||
static int p8_aes_xts_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *alg = crypto_tfm_alg_name(tfm);
|
||||
struct crypto_skcipher *fallback;
|
||||
struct crypto_sync_skcipher *fallback;
|
||||
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
fallback = crypto_alloc_skcipher(alg, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
fallback = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_skcipher_set_flags(
|
||||
crypto_sync_skcipher_set_flags(
|
||||
fallback,
|
||||
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
|
||||
ctx->fallback = fallback;
|
||||
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
|
||||
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_skcipher(ctx->fallback);
|
||||
crypto_free_sync_skcipher(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
|
||||
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
|
||||
|
||||
if (in_interrupt()) {
|
||||
SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_tfm(req, ctx->fallback);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
|
||||
skcipher_request_set_sync_tfm(req, ctx->fallback);
|
||||
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
|
||||
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
|
||||
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
|
||||
|
Reference in New Issue
Block a user