crypto-qti: qcedev: add support for hlos offload path
Add support for HLOS offload data path in the qcedev driver mainly to support DRM and HDCP usecases. Changes extend the current driver to support the following. - Register multiple pipes for different offload usecases. - Report timer expiry errors back to userspace. - Support different iv CTR sizes based on userspace input. - Support new IOCTLS to support encryption, decryption and copy offload usecases for DRM and HDCP. Change-Id: Ie9b74c173d0afd7b8c863ed57a68ec6e74baa9b4
This commit is contained in:
2
Kbuild
2
Kbuild
@@ -17,7 +17,7 @@ tz_log_dlkm-objs := tz_log/tz_log.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o
|
||||
qce50_dlkm-objs := crypto-qti/qce50.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcedev-mod_dlkm.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o
|
||||
qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o
|
||||
|
@@ -105,6 +105,14 @@ enum qce_req_op_enum {
|
||||
QCE_REQ_LAST
|
||||
};
|
||||
|
||||
/* Offload operation type */
|
||||
enum qce_offload_op_enum {
|
||||
QCE_OFFLOAD_HLOS_HLOS = 1,
|
||||
QCE_OFFLOAD_HLOS_CPB = 2,
|
||||
QCE_OFFLOAD_CPB_HLOS = 3,
|
||||
QCE_OFFLOAD_OPER_LAST
|
||||
};
|
||||
|
||||
/* Algorithms/features supported in CE HW engine */
|
||||
struct ce_hw_support {
|
||||
bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
|
||||
@@ -147,6 +155,7 @@ struct qce_sha_req {
|
||||
unsigned int size; /* data length in bytes */
|
||||
void *areq;
|
||||
unsigned int flags;
|
||||
int current_req_info;
|
||||
};
|
||||
|
||||
struct qce_req {
|
||||
@@ -168,10 +177,17 @@ struct qce_req {
|
||||
unsigned int encklen; /* cipher key length */
|
||||
unsigned char *iv; /* initialization vector */
|
||||
unsigned int ivsize; /* initialization vector size*/
|
||||
unsigned int iv_ctr_size; /* iv increment counter size*/
|
||||
unsigned int cryptlen; /* data length */
|
||||
unsigned int use_pmem; /* is source of data PMEM allocated? */
|
||||
struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
|
||||
unsigned int flags;
|
||||
enum qce_offload_op_enum offload_op; /* Offload usecase */
|
||||
bool is_pattern_valid; /* Is pattern setting required */
|
||||
unsigned int pattern_info; /* Pattern info for offload operation */
|
||||
unsigned int block_offset; /* partial first block for AES CTR */
|
||||
bool is_copy_op; /* copy buffers without crypto ops */
|
||||
int current_req_info;
|
||||
};
|
||||
|
||||
struct qce_pm_table {
|
||||
@@ -192,5 +208,8 @@ int qce_disable_clk(void *handle);
|
||||
void qce_get_driver_stats(void *handle);
|
||||
void qce_clear_driver_stats(void *handle);
|
||||
void qce_dump_req(void *handle);
|
||||
|
||||
void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2,
|
||||
unsigned int *s3, unsigned int *s4,
|
||||
unsigned int *s5, unsigned int *s6);
|
||||
int qce_manage_timeout(void *handle, int req_info);
|
||||
#endif /* __CRYPTO_MSM_QCE_H */
|
||||
|
@@ -36,7 +36,7 @@
|
||||
#define CRYPTO_SMMU_IOVA_START 0x10000000
|
||||
#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
|
||||
|
||||
#define CRYPTO_CONFIG_RESET 0xE01EF
|
||||
#define CRYPTO_CONFIG_RESET 0xE001F
|
||||
#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
|
||||
#define QCE_MAX_NUM_DSCR 0x200
|
||||
#define QCE_SECTOR_SIZE 0x200
|
||||
@@ -84,6 +84,9 @@ static LIST_HEAD(qce50_bam_list);
|
||||
|
||||
#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
|
||||
|
||||
#define AES_CTR_IV_CTR_SIZE 64
|
||||
#define EXPECTED_STATUS1_REG_VAL 0x2000006
|
||||
|
||||
enum qce_owner {
|
||||
QCE_OWNER_NONE = 0,
|
||||
QCE_OWNER_CLIENT = 1,
|
||||
@@ -156,6 +159,8 @@ struct qce_device {
|
||||
struct dma_iommu_mapping *smmu_mapping;
|
||||
bool enable_s1_smmu;
|
||||
bool no_clock_support;
|
||||
bool kernel_pipes_support;
|
||||
bool offload_pipes_support;
|
||||
};
|
||||
|
||||
static void print_notify_debug(struct sps_event_notify *notify);
|
||||
@@ -175,6 +180,112 @@ static uint32_t _std_init_vector_sha256[] = {
|
||||
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
|
||||
};
|
||||
|
||||
/*
|
||||
* Requests for offload operations do not require explicit dma operations
|
||||
* as they already have SMMU mapped source/destination buffers.
|
||||
*/
|
||||
static bool is_offload_op(int op)
|
||||
{
|
||||
return (op == QCE_OFFLOAD_HLOS_HLOS || op == QCE_OFFLOAD_HLOS_CPB ||
|
||||
op == QCE_OFFLOAD_CPB_HLOS);
|
||||
}
|
||||
|
||||
static uint32_t qce_get_config_be(struct qce_device *pce_dev,
|
||||
uint32_t pipe_pair)
|
||||
{
|
||||
uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
|
||||
|
||||
return (beats << CRYPTO_REQ_SIZE |
|
||||
BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
|
||||
BIT(CRYPTO_MASK_OP_DONE_INTR) | 0 << CRYPTO_HIGH_SPD_EN_N |
|
||||
pipe_pair << CRYPTO_PIPE_SET_SELECT);
|
||||
}
|
||||
|
||||
static void dump_status_regs(unsigned int s1, unsigned int s2,unsigned int s3,
|
||||
unsigned int s4, unsigned int s5,unsigned int s6)
|
||||
{
|
||||
pr_err("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, s1);
|
||||
pr_err("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, s2);
|
||||
pr_err("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, s3);
|
||||
pr_err("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, s4);
|
||||
pr_err("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, s5);
|
||||
pr_err("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, s6);
|
||||
}
|
||||
|
||||
void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2,
|
||||
unsigned int *s3, unsigned int *s4,
|
||||
unsigned int *s5, unsigned int *s6)
|
||||
{
|
||||
struct qce_device *pce_dev = (struct qce_device *) handle;
|
||||
|
||||
*s1 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
|
||||
*s2 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS2_REG);
|
||||
*s3 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS3_REG);
|
||||
*s4 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS4_REG);
|
||||
*s5 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS5_REG);
|
||||
*s6 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS6_REG);
|
||||
|
||||
#ifdef QCE_DEBUG
|
||||
dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6);
|
||||
#else
|
||||
if (*s1 != EXPECTED_STATUS1_REG_VAL)
|
||||
dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL(qce_get_crypto_status);
|
||||
|
||||
static void qce_set_offload_config(struct qce_device *pce_dev,
|
||||
struct qce_req *creq)
|
||||
{
|
||||
uint32_t config_be = pce_dev->reg.crypto_cfg_be;
|
||||
|
||||
switch (creq->offload_op) {
|
||||
case QCE_OFFLOAD_HLOS_HLOS:
|
||||
config_be = qce_get_config_be(pce_dev,
|
||||
pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS]);
|
||||
break;
|
||||
case QCE_OFFLOAD_HLOS_CPB:
|
||||
config_be = qce_get_config_be(pce_dev,
|
||||
pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB]);
|
||||
break;
|
||||
case QCE_OFFLOAD_CPB_HLOS:
|
||||
config_be = qce_get_config_be(pce_dev,
|
||||
pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS]);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pce_dev->reg.crypto_cfg_be = config_be;
|
||||
pce_dev->reg.crypto_cfg_le = (config_be |
|
||||
CRYPTO_LITTLE_ENDIAN_MASK);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* IV counter mask is be set based on the values sent through the offload ioctl
|
||||
* calls. Currently for offload operations, it is 64 bytes of mask for AES CTR,
|
||||
* and 128 bytes of mask for AES CBC.
|
||||
*/
|
||||
static void qce_set_iv_ctr_mask(struct qce_device *pce_dev,
|
||||
struct qce_req *creq)
|
||||
{
|
||||
if (creq->iv_ctr_size == AES_CTR_IV_CTR_SIZE) {
|
||||
pce_dev->reg.encr_cntr_mask_0 = 0x0;
|
||||
pce_dev->reg.encr_cntr_mask_1 = 0x0;
|
||||
pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
|
||||
} else {
|
||||
pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
|
||||
unsigned int len)
|
||||
{
|
||||
@@ -725,12 +836,21 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
uint32_t ivsize = creq->ivsize;
|
||||
int i;
|
||||
struct sps_command_element *pce = NULL;
|
||||
bool is_des_cipher = false;
|
||||
|
||||
if (creq->mode == QCE_MODE_XTS)
|
||||
key_size = creq->encklen/2;
|
||||
else
|
||||
key_size = creq->encklen;
|
||||
|
||||
qce_set_offload_config(pce_dev, creq);
|
||||
|
||||
pce = cmdlistinfo->crypto_cfg;
|
||||
pce->data = pce_dev->reg.crypto_cfg_be;
|
||||
|
||||
pce = cmdlistinfo->crypto_cfg_le;
|
||||
pce->data = pce_dev->reg.crypto_cfg_le;
|
||||
|
||||
pce = cmdlistinfo->go_proc;
|
||||
if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
|
||||
use_hw_key = true;
|
||||
@@ -739,7 +859,6 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
QCRYPTO_CTX_USE_PIPE_KEY)
|
||||
use_pipe_key = true;
|
||||
}
|
||||
pce = cmdlistinfo->go_proc;
|
||||
if (use_hw_key)
|
||||
pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
|
||||
pce_dev->phy_iobase);
|
||||
@@ -857,6 +976,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
pce++;
|
||||
pce->data = enckey32[1];
|
||||
}
|
||||
is_des_cipher = true;
|
||||
break;
|
||||
case CIPHER_ALG_3DES:
|
||||
if (creq->mode != QCE_MODE_ECB) {
|
||||
@@ -877,6 +997,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
for (i = 0; i < 6; i++, pce++)
|
||||
pce->data = enckey32[i];
|
||||
}
|
||||
is_des_cipher = true;
|
||||
break;
|
||||
case CIPHER_ALG_AES:
|
||||
default:
|
||||
@@ -971,6 +1092,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
encr_cfg |=
|
||||
((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
|
||||
}
|
||||
|
||||
if (use_hw_key)
|
||||
encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
|
||||
else
|
||||
@@ -979,10 +1101,14 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
|
||||
/* write encr seg size */
|
||||
pce = cmdlistinfo->encr_seg_size;
|
||||
if (creq->is_copy_op) {
|
||||
pce->data = 0;
|
||||
} else {
|
||||
if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
|
||||
pce->data = (creq->cryptlen + creq->authsize);
|
||||
else
|
||||
pce->data = creq->cryptlen;
|
||||
}
|
||||
|
||||
/* write encr seg start */
|
||||
pce = cmdlistinfo->encr_seg_start;
|
||||
@@ -992,6 +1118,41 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
|
||||
pce = cmdlistinfo->seg_size;
|
||||
pce->data = totallen_in;
|
||||
|
||||
if (is_offload_op(creq->offload_op)) {
|
||||
/* pattern info */
|
||||
pce = cmdlistinfo->pattern_info;
|
||||
if (creq->is_pattern_valid)
|
||||
pce->data = creq->pattern_info;
|
||||
|
||||
/* block offset */
|
||||
pce = cmdlistinfo->block_offset;
|
||||
pce->data = (creq->block_offset << 4) |
|
||||
(creq->block_offset ? 1: 0);
|
||||
|
||||
/* IV counter size */
|
||||
qce_set_iv_ctr_mask(pce_dev, creq);
|
||||
}
|
||||
|
||||
if (!is_des_cipher) {
|
||||
pce = cmdlistinfo->encr_mask_3;
|
||||
pce->data = pce_dev->reg.encr_cntr_mask_3;
|
||||
pce = cmdlistinfo->encr_mask_2;
|
||||
pce->data = pce_dev->reg.encr_cntr_mask_2;
|
||||
pce = cmdlistinfo->encr_mask_1;
|
||||
pce->data = pce_dev->reg.encr_cntr_mask_1;
|
||||
pce = cmdlistinfo->encr_mask_0;
|
||||
pce->data = pce_dev->reg.encr_cntr_mask_0;
|
||||
}
|
||||
|
||||
pce = cmdlistinfo->go_proc;
|
||||
pce->data = 0;
|
||||
if (is_offload_op(creq->offload_op))
|
||||
pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT));
|
||||
else
|
||||
pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT) |
|
||||
(1 << CRYPTO_RESULTS_DUMP));
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1128,11 +1289,11 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
|
||||
|
||||
pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
|
||||
iovec = pce_sps_data->in_transfer.iovec;
|
||||
pr_info("==============================================\n");
|
||||
pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
|
||||
pr_info("==============================================\n");
|
||||
pr_err("==============================================\n");
|
||||
pr_err("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
|
||||
pr_err("==============================================\n");
|
||||
for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
|
||||
pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
|
||||
pr_err(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
|
||||
iovec->addr, iovec->size, iovec->flags);
|
||||
if (iovec->flags & cmd_flags) {
|
||||
struct sps_command_element *pced;
|
||||
@@ -1141,7 +1302,7 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
|
||||
(GET_VIRT_ADDR(iovec->addr));
|
||||
ents = iovec->size/(sizeof(struct sps_command_element));
|
||||
for (j = 0; j < ents; j++) {
|
||||
pr_info(" [%d] [0x%x] 0x%x\n", j,
|
||||
pr_err(" [%d] [0x%x] 0x%x\n", j,
|
||||
pced->addr, pced->data);
|
||||
pced++;
|
||||
}
|
||||
@@ -1149,9 +1310,9 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
|
||||
iovec++;
|
||||
}
|
||||
|
||||
pr_info("==============================================\n");
|
||||
pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
|
||||
pr_info("==============================================\n");
|
||||
pr_err("==============================================\n");
|
||||
pr_err("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
|
||||
pr_err("==============================================\n");
|
||||
iovec = pce_sps_data->out_transfer.iovec;
|
||||
for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
|
||||
pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
|
||||
@@ -1567,8 +1728,11 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
|
||||
/* clear status */
|
||||
QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
|
||||
|
||||
QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
|
||||
CRYPTO_CONFIG_REG));
|
||||
qce_set_offload_config(pce_dev, creq);
|
||||
QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be,
|
||||
(pce_dev->iobase + CRYPTO_CONFIG_REG));
|
||||
QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le,
|
||||
(pce_dev->iobase + CRYPTO_CONFIG_REG));
|
||||
/*
|
||||
* Ensure previous instructions (setting the CONFIG register)
|
||||
* was completed before issuing starting to set other config register
|
||||
@@ -1837,25 +2001,34 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
|
||||
pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
|
||||
}
|
||||
|
||||
/* write pattern */
|
||||
if (creq->is_pattern_valid)
|
||||
QCE_WRITE_REG(creq->pattern_info, pce_dev->iobase +
|
||||
CRYPTO_DATA_PATT_PROC_CFG_REG);
|
||||
|
||||
/* write block offset to CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG? */
|
||||
QCE_WRITE_REG(((creq->block_offset << 4) |
|
||||
(creq->block_offset ? 1 : 0)),
|
||||
pce_dev->iobase + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG);
|
||||
|
||||
/* write encr seg start */
|
||||
QCE_WRITE_REG((coffset & 0xffff),
|
||||
pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
|
||||
|
||||
/* write encr counter mask */
|
||||
QCE_WRITE_REG(0xffffffff,
|
||||
qce_set_iv_ctr_mask(pce_dev, creq);
|
||||
QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_3,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
|
||||
QCE_WRITE_REG(0xffffffff,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
|
||||
QCE_WRITE_REG(0xffffffff,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
|
||||
QCE_WRITE_REG(0xffffffff,
|
||||
QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_2,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
|
||||
QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_1,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
|
||||
QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_0,
|
||||
pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
|
||||
|
||||
/* write seg size */
|
||||
QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
|
||||
|
||||
QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
|
||||
CRYPTO_CONFIG_REG));
|
||||
/* issue go to crypto */
|
||||
if (!use_hw_key) {
|
||||
QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -2042,11 +2215,12 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
|
||||
int rc = 0;
|
||||
struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
|
||||
[req_info].ce_sps;
|
||||
uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
|
||||
|
||||
if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr)
|
||||
return rc;
|
||||
|
||||
rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
|
||||
rc = sps_transfer_one(pce_dev->ce_bam_info.consumer[op].pipe,
|
||||
GET_PHYS_ADDR(
|
||||
pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist),
|
||||
0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
|
||||
@@ -2060,6 +2234,34 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
|
||||
static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
|
||||
bool is_complete);
|
||||
|
||||
int qce_manage_timeout(void *handle, int req_info)
|
||||
{
|
||||
int rc = 0;
|
||||
struct qce_device *pce_dev = (struct qce_device *) handle;
|
||||
struct skcipher_request *areq;
|
||||
struct ce_request_info *preq_info;
|
||||
qce_comp_func_ptr_t qce_callback;
|
||||
uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
|
||||
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
qce_callback = preq_info->qce_cb;
|
||||
areq = (struct skcipher_request *) preq_info->areq;
|
||||
|
||||
pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info, op);
|
||||
rc = _qce_unlock_other_pipes(pce_dev, req_info);
|
||||
if (rc)
|
||||
pr_err("%s: fail unlock other pipes, rc = %d", __func__, rc);
|
||||
qce_free_req_info(pce_dev, req_info, true);
|
||||
qce_callback(areq, NULL, NULL, 0);
|
||||
sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
|
||||
pce_dev->ce_bam_info.dest_pipe_index[op]);
|
||||
sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
|
||||
pce_dev->ce_bam_info.src_pipe_index[op]);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(qce_manage_timeout);
|
||||
|
||||
static int _aead_complete(struct qce_device *pce_dev, int req_info)
|
||||
{
|
||||
struct aead_request *areq;
|
||||
@@ -2260,13 +2462,16 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
qce_callback = preq_info->qce_cb;
|
||||
areq = (struct skcipher_request *) preq_info->areq;
|
||||
if (areq->src != areq->dst) {
|
||||
|
||||
if (!is_offload_op(preq_info->offload_op)) {
|
||||
if (areq->src != areq->dst)
|
||||
qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
|
||||
preq_info->dst_nents, DMA_FROM_DEVICE);
|
||||
}
|
||||
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
|
||||
qce_dma_unmap_sg(pce_dev->pdev, areq->src,
|
||||
preq_info->src_nents,
|
||||
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (_qce_unlock_other_pipes(pce_dev, req_info)) {
|
||||
qce_free_req_info(pce_dev, req_info, true);
|
||||
@@ -2276,12 +2481,16 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
|
||||
result_dump_status = be32_to_cpu(pce_sps_data->result->status);
|
||||
pce_sps_data->result->status = 0;
|
||||
|
||||
if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
|
||||
| (1 << CRYPTO_HSD_ERR))) {
|
||||
if (!is_offload_op(preq_info->offload_op)) {
|
||||
if (result_dump_status & ((1 << CRYPTO_SW_ERR) |
|
||||
(1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) {
|
||||
pr_err("ablk_cipher operation error. Status %x\n",
|
||||
result_dump_status);
|
||||
result_status = -ENXIO;
|
||||
} else if (pce_sps_data->consumer_status |
|
||||
}
|
||||
}
|
||||
|
||||
if (pce_sps_data->consumer_status |
|
||||
pce_sps_data->producer_status) {
|
||||
pr_err("ablk_cipher sps operation error. sps status %x %x\n",
|
||||
pce_sps_data->consumer_status,
|
||||
@@ -2579,6 +2788,7 @@ static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
|
||||
{
|
||||
int rc = 0;
|
||||
struct ce_sps_data *pce_sps_data;
|
||||
uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
|
||||
|
||||
pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
|
||||
pce_sps_data->out_transfer.user =
|
||||
@@ -2590,20 +2800,20 @@ static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
|
||||
_qce_dump_descr_fifos_dbg(pce_dev, req_info);
|
||||
|
||||
if (pce_sps_data->in_transfer.iovec_count) {
|
||||
rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
|
||||
rc = sps_transfer(pce_dev->ce_bam_info.consumer[op].pipe,
|
||||
&pce_sps_data->in_transfer);
|
||||
if (rc) {
|
||||
pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
|
||||
(uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
|
||||
pr_err("sps_xfr() fail (cons pipe=0x%lx) rc = %d\n",
|
||||
(uintptr_t)pce_dev->ce_bam_info.consumer[op].pipe,
|
||||
rc);
|
||||
goto ret;
|
||||
}
|
||||
}
|
||||
rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
|
||||
rc = sps_transfer(pce_dev->ce_bam_info.producer[op].pipe,
|
||||
&pce_sps_data->out_transfer);
|
||||
if (rc)
|
||||
pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
|
||||
(uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
|
||||
(uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc);
|
||||
ret:
|
||||
if (rc)
|
||||
_qce_dump_descr_fifos(pce_dev, req_info);
|
||||
@@ -2625,6 +2835,7 @@ ret:
|
||||
*
|
||||
* @pce_dev - Pointer to qce_device structure
|
||||
* @ep - Pointer to sps endpoint data structure
|
||||
* @index - Points to crypto use case
|
||||
* @is_produce - 1 means Producer endpoint
|
||||
* 0 means Consumer endpoint
|
||||
*
|
||||
@@ -2633,6 +2844,7 @@ ret:
|
||||
*/
|
||||
static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
|
||||
struct qce_sps_ep_conn_data *ep,
|
||||
int index,
|
||||
bool is_producer)
|
||||
{
|
||||
int rc = 0;
|
||||
@@ -2686,12 +2898,13 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
|
||||
|
||||
/* Producer pipe index */
|
||||
sps_connect_info->src_pipe_index =
|
||||
pce_dev->ce_bam_info.src_pipe_index;
|
||||
pce_dev->ce_bam_info.src_pipe_index[index];
|
||||
/* Consumer pipe index */
|
||||
sps_connect_info->dest_pipe_index =
|
||||
pce_dev->ce_bam_info.dest_pipe_index;
|
||||
pce_dev->ce_bam_info.dest_pipe_index[index];
|
||||
/* Set pipe group */
|
||||
sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
|
||||
sps_connect_info->lock_group =
|
||||
pce_dev->ce_bam_info.pipe_pair_index[index];
|
||||
sps_connect_info->event_thresh = 0x10;
|
||||
/*
|
||||
* Max. no of scatter/gather buffers that can
|
||||
@@ -2941,7 +3154,7 @@ ret:
|
||||
*/
|
||||
static int qce_sps_init(struct qce_device *pce_dev)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc = 0, i = 0;
|
||||
|
||||
rc = qce_sps_get_bam(pce_dev);
|
||||
if (rc)
|
||||
@@ -2949,14 +3162,20 @@ static int qce_sps_init(struct qce_device *pce_dev)
|
||||
pr_debug("BAM device registered. bam_handle=0x%lx\n",
|
||||
pce_dev->ce_bam_info.bam_handle);
|
||||
|
||||
for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
|
||||
if (i == 0 && !(pce_dev->kernel_pipes_support))
|
||||
continue;
|
||||
else if ((i > 0) && !(pce_dev->offload_pipes_support))
|
||||
break;
|
||||
rc = qce_sps_init_ep_conn(pce_dev,
|
||||
&pce_dev->ce_bam_info.producer, true);
|
||||
&pce_dev->ce_bam_info.producer[i], i, true);
|
||||
if (rc)
|
||||
goto sps_connect_producer_err;
|
||||
rc = qce_sps_init_ep_conn(pce_dev,
|
||||
&pce_dev->ce_bam_info.consumer, false);
|
||||
&pce_dev->ce_bam_info.consumer[i], i, false);
|
||||
if (rc)
|
||||
goto sps_connect_consumer_err;
|
||||
}
|
||||
|
||||
pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
|
||||
(unsigned long long)pce_dev->ce_bam_info.bam_mem,
|
||||
@@ -2964,7 +3183,7 @@ static int qce_sps_init(struct qce_device *pce_dev)
|
||||
return rc;
|
||||
|
||||
sps_connect_consumer_err:
|
||||
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
|
||||
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer[i]);
|
||||
sps_connect_producer_err:
|
||||
qce_sps_release_bam(pce_dev);
|
||||
return rc;
|
||||
@@ -3124,6 +3343,7 @@ static void _sps_producer_callback(struct sps_event_notify *notify)
|
||||
unsigned int req_info;
|
||||
struct ce_sps_data *pce_sps_data;
|
||||
struct ce_request_info *preq_info;
|
||||
uint16_t op;
|
||||
|
||||
print_notify_debug(notify);
|
||||
|
||||
@@ -3140,25 +3360,30 @@ static void _sps_producer_callback(struct sps_event_notify *notify)
|
||||
}
|
||||
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
op = pce_dev->ce_request_info[req_info].offload_op;
|
||||
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
|
||||
preq_info->xfer_type == QCE_XFER_AEAD) &&
|
||||
pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
|
||||
pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
|
||||
if (!is_offload_op(op)) {
|
||||
pce_sps_data->out_transfer.iovec_count = 0;
|
||||
_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
|
||||
_qce_sps_add_data(GET_PHYS_ADDR(
|
||||
pce_sps_data->result_dump),
|
||||
CRYPTO_RESULT_DUMP_SIZE,
|
||||
&pce_sps_data->out_transfer);
|
||||
_qce_set_flag(&pce_sps_data->out_transfer,
|
||||
SPS_IOVEC_FLAG_INT);
|
||||
rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
|
||||
rc = sps_transfer(
|
||||
pce_dev->ce_bam_info.producer[op].pipe,
|
||||
&pce_sps_data->out_transfer);
|
||||
if (rc) {
|
||||
pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
|
||||
(uintptr_t)pce_dev->ce_bam_info.producer.pipe,
|
||||
pr_err("sps_xfr fail (prod pipe=0x%lx) rc = %d\n",
|
||||
(uintptr_t)pce_dev->ce_bam_info.producer[op].pipe,
|
||||
rc);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3179,8 +3404,18 @@ static void _sps_producer_callback(struct sps_event_notify *notify)
|
||||
*/
|
||||
static void qce_sps_exit(struct qce_device *pce_dev)
|
||||
{
|
||||
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
|
||||
qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
|
||||
if (i == 0 && !(pce_dev->kernel_pipes_support))
|
||||
continue;
|
||||
else if ((i > 0) && !(pce_dev->offload_pipes_support))
|
||||
break;
|
||||
qce_sps_exit_ep_conn(pce_dev,
|
||||
&pce_dev->ce_bam_info.consumer[i]);
|
||||
qce_sps_exit_ep_conn(pce_dev,
|
||||
&pce_dev->ce_bam_info.producer[i]);
|
||||
}
|
||||
qce_sps_release_bam(pce_dev);
|
||||
}
|
||||
|
||||
@@ -3301,6 +3536,11 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
|
||||
/* clear status register */
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS2_REG, 0, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS3_REG, 0, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS4_REG, 0, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS5_REG, 0, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS6_REG, 0, NULL);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
|
||||
@@ -3314,15 +3554,20 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
|
||||
&pcl_info->encr_seg_start);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
|
||||
(uint32_t)0xffffffff, &pcl_info->encr_mask);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
|
||||
pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
|
||||
pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
|
||||
&pcl_info->auth_seg_cfg);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_DATA_PATT_PROC_CFG_REG, 0,
|
||||
&pcl_info->pattern_info);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr,
|
||||
CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG, 0,
|
||||
&pcl_info->block_offset);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
|
||||
&pcl_info->encr_key);
|
||||
for (i = 1; i < key_reg; i++)
|
||||
@@ -3359,7 +3604,7 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
0, &pcl_info->auth_seg_size);
|
||||
}
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -3473,7 +3718,7 @@ static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
}
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -3695,7 +3940,7 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
0, NULL);
|
||||
}
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -3908,7 +4153,7 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
|
||||
&pcl_info->auth_seg_start);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -3980,13 +4225,13 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
|
||||
&pcl_info->encr_seg_start);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
|
||||
(uint32_t)0xffffffff, &pcl_info->encr_mask);
|
||||
pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
|
||||
(uint32_t)0xffffffff, NULL);
|
||||
pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
|
||||
auth_cfg, &pcl_info->auth_seg_cfg);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
|
||||
@@ -4041,7 +4286,7 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
0, NULL);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -4127,7 +4372,7 @@ static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
|
||||
NULL);
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -4209,7 +4454,7 @@ static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
|
||||
pdev->reg.crypto_cfg_le, NULL);
|
||||
pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
|
||||
|
||||
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
|
||||
((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
|
||||
@@ -4396,13 +4641,9 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
|
||||
|
||||
static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
|
||||
{
|
||||
uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
|
||||
uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
|
||||
uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index[0];
|
||||
|
||||
pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
|
||||
BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
|
||||
BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
|
||||
(pipe_pair << CRYPTO_PIPE_SET_SELECT);
|
||||
pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair);
|
||||
|
||||
pce_dev->reg.crypto_cfg_le =
|
||||
(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
|
||||
@@ -4565,6 +4806,13 @@ static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
|
||||
pce_dev->reg.auth_cfg_snow3g =
|
||||
(CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
|
||||
BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
|
||||
|
||||
/* Initialize IV counter mask values */
|
||||
pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
|
||||
pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4701,6 +4949,7 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
q_req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
|
||||
@@ -4880,15 +5129,22 @@ static int _qce_suspend(void *handle)
|
||||
{
|
||||
struct qce_device *pce_dev = (struct qce_device *)handle;
|
||||
struct sps_pipe *sps_pipe_info;
|
||||
int i = 0;
|
||||
|
||||
if (handle == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
|
||||
for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
|
||||
if (i == 0 && !(pce_dev->kernel_pipes_support))
|
||||
continue;
|
||||
else if ((i > 0) && !(pce_dev->offload_pipes_support))
|
||||
break;
|
||||
sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
|
||||
sps_disconnect(sps_pipe_info);
|
||||
|
||||
sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
|
||||
sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
|
||||
sps_disconnect(sps_pipe_info);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4898,32 +5154,41 @@ static int _qce_resume(void *handle)
|
||||
struct qce_device *pce_dev = (struct qce_device *)handle;
|
||||
struct sps_pipe *sps_pipe_info;
|
||||
struct sps_connect *sps_connect_info;
|
||||
int rc;
|
||||
int rc, i;
|
||||
|
||||
if (handle == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
|
||||
sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
|
||||
memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
|
||||
for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
|
||||
if (i == 0 && !(pce_dev->kernel_pipes_support))
|
||||
continue;
|
||||
else if ((i > 0) && !(pce_dev->offload_pipes_support))
|
||||
break;
|
||||
sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
|
||||
sps_connect_info = &pce_dev->ce_bam_info.consumer[i].connect;
|
||||
memset(sps_connect_info->desc.base, 0x00,
|
||||
sps_connect_info->desc.size);
|
||||
rc = sps_connect(sps_pipe_info, sps_connect_info);
|
||||
if (rc) {
|
||||
pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
|
||||
pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
|
||||
(uintptr_t)sps_pipe_info, rc);
|
||||
return rc;
|
||||
}
|
||||
sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
|
||||
sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
|
||||
memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
|
||||
sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
|
||||
sps_connect_info = &pce_dev->ce_bam_info.producer[i].connect;
|
||||
memset(sps_connect_info->desc.base, 0x00,
|
||||
sps_connect_info->desc.size);
|
||||
rc = sps_connect(sps_pipe_info, sps_connect_info);
|
||||
if (rc)
|
||||
pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
|
||||
pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
|
||||
(uintptr_t)sps_pipe_info, rc);
|
||||
|
||||
rc = sps_register_event(sps_pipe_info,
|
||||
&pce_dev->ce_bam_info.producer.event);
|
||||
&pce_dev->ce_bam_info.producer[i].event);
|
||||
if (rc)
|
||||
pr_err("Producer callback registration failed rc = %d\n", rc);
|
||||
pr_err("Producer cb registration failed rc = %d\n",
|
||||
rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -4951,6 +5216,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
q_req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
areq = (struct aead_request *) q_req->areq;
|
||||
@@ -5124,6 +5390,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
c_req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
|
||||
@@ -5133,12 +5400,16 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
/* cipher input */
|
||||
preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
|
||||
|
||||
qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
|
||||
if (!is_offload_op(c_req->offload_op))
|
||||
qce_dma_map_sg(pce_dev->pdev, areq->src,
|
||||
preq_info->src_nents,
|
||||
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* cipher output */
|
||||
if (areq->src != areq->dst) {
|
||||
preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen);
|
||||
if (!is_offload_op(c_req->offload_op))
|
||||
qce_dma_map_sg(pce_dev->pdev, areq->dst,
|
||||
preq_info->dst_nents, DMA_FROM_DEVICE);
|
||||
} else {
|
||||
@@ -5172,6 +5443,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
goto bad;
|
||||
|
||||
preq_info->mode = c_req->mode;
|
||||
preq_info->offload_op = c_req->offload_op;
|
||||
|
||||
/* setup for client callback, and issue command to BAM */
|
||||
preq_info->areq = areq;
|
||||
@@ -5185,7 +5457,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
if (pce_dev->support_cmd_dscr && cmdlistinfo)
|
||||
_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
|
||||
&pce_sps_data->in_transfer);
|
||||
if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
|
||||
if (_qce_sps_add_data(areq->src->dma_address, areq->cryptlen,
|
||||
&pce_sps_data->in_transfer))
|
||||
goto bad;
|
||||
_qce_set_flag(&pce_sps_data->in_transfer,
|
||||
@@ -5196,11 +5468,12 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
&pce_sps_data->cmdlistptr.unlock_all_pipes,
|
||||
&pce_sps_data->in_transfer);
|
||||
|
||||
if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->cryptlen,
|
||||
if (_qce_sps_add_data(areq->dst->dma_address, areq->cryptlen,
|
||||
&pce_sps_data->out_transfer))
|
||||
goto bad;
|
||||
if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) {
|
||||
pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
|
||||
if (!is_offload_op(c_req->offload_op))
|
||||
if (_qce_sps_add_data(
|
||||
GET_PHYS_ADDR(pce_sps_data->result_dump),
|
||||
CRYPTO_RESULT_DUMP_SIZE,
|
||||
@@ -5218,18 +5491,19 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
|
||||
|
||||
return 0;
|
||||
bad:
|
||||
if (areq->src != areq->dst) {
|
||||
if (preq_info->dst_nents) {
|
||||
if (!is_offload_op(c_req->offload_op)) {
|
||||
if (areq->src != areq->dst)
|
||||
if (preq_info->dst_nents)
|
||||
qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
|
||||
preq_info->dst_nents, DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
if (preq_info->src_nents) {
|
||||
|
||||
if (preq_info->src_nents)
|
||||
qce_dma_unmap_sg(pce_dev->pdev, areq->src,
|
||||
preq_info->src_nents,
|
||||
(areq->src == areq->dst) ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
qce_free_req_info(pce_dev, req_info, false);
|
||||
return rc;
|
||||
}
|
||||
@@ -5257,6 +5531,7 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
sreq->current_req_info = req_info;
|
||||
areq = (struct ahash_request *)sreq->areq;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
@@ -5349,6 +5624,7 @@ int qce_f8_req(void *handle, struct qce_f8_req *req,
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
|
||||
@@ -5472,6 +5748,7 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
|
||||
@@ -5579,6 +5856,7 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
|
||||
req_info = qce_alloc_req_info(pce_dev);
|
||||
if (req_info < 0)
|
||||
return -EBUSY;
|
||||
req->current_req_info = req_info;
|
||||
preq_info = &pce_dev->ce_request_info[req_info];
|
||||
pce_sps_data = &preq_info->ce_sps;
|
||||
switch (req->algorithm) {
|
||||
@@ -5648,7 +5926,7 @@ static int __qce_get_device_tree_data(struct platform_device *pdev,
|
||||
struct qce_device *pce_dev)
|
||||
{
|
||||
struct resource *resource;
|
||||
int rc = 0;
|
||||
int rc = 0, i = 0;
|
||||
|
||||
pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,ce-hw-shared");
|
||||
@@ -5680,12 +5958,39 @@ static int __qce_get_device_tree_data(struct platform_device *pdev,
|
||||
pce_dev->request_bw_before_clk = of_property_read_bool(
|
||||
(&pdev->dev)->of_node, "qcom,request-bw-before-clk");
|
||||
|
||||
pce_dev->kernel_pipes_support = true;
|
||||
if (of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,bam-pipe-pair",
|
||||
&pce_dev->ce_bam_info.pipe_pair_index)) {
|
||||
pr_err("Fail to get bam pipe pair information.\n");
|
||||
&pce_dev->ce_bam_info.pipe_pair_index[0])) {
|
||||
pr_warn("Kernel pipes not supported.\n");
|
||||
//Unused pipe, just as failsafe.
|
||||
pce_dev->ce_bam_info.pipe_pair_index[0] = 2;
|
||||
pce_dev->kernel_pipes_support = false;
|
||||
}
|
||||
|
||||
if (of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,offload-ops-support")) {
|
||||
pce_dev->offload_pipes_support = true;
|
||||
if (of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,bam-pipe-offload-cpb-hlos",
|
||||
&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS])) {
|
||||
pr_err("Fail to get bam offload cpb-hlos pipe pair info.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,bam-pipe-offload-hlos-hlos",
|
||||
&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS])) {
|
||||
pr_err("Fail to get bam offload hlos-hlos info.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,bam-pipe-offload-hlos-cpb",
|
||||
&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB])) {
|
||||
pr_err("Fail to get bam offload hlos-cpb info\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,ce-device",
|
||||
&pce_dev->ce_bam_info.ce_device)) {
|
||||
@@ -5717,10 +6022,13 @@ static int __qce_get_device_tree_data(struct platform_device *pdev,
|
||||
pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,no-clock-support");
|
||||
|
||||
pce_dev->ce_bam_info.dest_pipe_index =
|
||||
2 * pce_dev->ce_bam_info.pipe_pair_index;
|
||||
pce_dev->ce_bam_info.src_pipe_index =
|
||||
pce_dev->ce_bam_info.dest_pipe_index + 1;
|
||||
for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
|
||||
/* Source/destination pipes for all usecases */
|
||||
pce_dev->ce_bam_info.dest_pipe_index[i] =
|
||||
2 * pce_dev->ce_bam_info.pipe_pair_index[i];
|
||||
pce_dev->ce_bam_info.src_pipe_index[i] =
|
||||
pce_dev->ce_bam_info.dest_pipe_index[i] + 1;
|
||||
}
|
||||
|
||||
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"crypto-base");
|
||||
@@ -6055,9 +6363,9 @@ void *qce_open(struct platform_device *pdev, int *rc)
|
||||
|
||||
qce_init_ce_cfg_val(pce_dev);
|
||||
*rc = qce_sps_init(pce_dev);
|
||||
if (*rc == 0)
|
||||
if (*rc)
|
||||
goto err;
|
||||
qce_setup_ce_sps_data(pce_dev);
|
||||
*rc = 0;
|
||||
qce_disable_clk(pce_dev);
|
||||
setup_dummy_req(pce_dev);
|
||||
atomic_set(&pce_dev->no_of_queued_req, 0);
|
||||
|
@@ -71,6 +71,7 @@ struct qce_cmdlist_info {
|
||||
|
||||
unsigned long cmdlist;
|
||||
struct sps_command_element *crypto_cfg;
|
||||
struct sps_command_element *crypto_cfg_le;
|
||||
struct sps_command_element *encr_seg_cfg;
|
||||
struct sps_command_element *encr_seg_size;
|
||||
struct sps_command_element *encr_seg_start;
|
||||
@@ -78,8 +79,13 @@ struct qce_cmdlist_info {
|
||||
struct sps_command_element *encr_xts_key;
|
||||
struct sps_command_element *encr_cntr_iv;
|
||||
struct sps_command_element *encr_ccm_cntr_iv;
|
||||
struct sps_command_element *encr_mask;
|
||||
struct sps_command_element *encr_mask_0;
|
||||
struct sps_command_element *encr_mask_1;
|
||||
struct sps_command_element *encr_mask_2;
|
||||
struct sps_command_element *encr_mask_3;
|
||||
struct sps_command_element *encr_xts_du_size;
|
||||
struct sps_command_element *pattern_info;
|
||||
struct sps_command_element *block_offset;
|
||||
|
||||
struct sps_command_element *auth_seg_cfg;
|
||||
struct sps_command_element *auth_seg_size;
|
||||
@@ -170,6 +176,15 @@ struct qce_ce_cfg_reg_setting {
|
||||
uint32_t auth_cfg_aead_sha256_hmac;
|
||||
uint32_t auth_cfg_kasumi;
|
||||
uint32_t auth_cfg_snow3g;
|
||||
|
||||
/* iv0 - bits 127:96 - CRYPTO_CNTR_MASK_REG0*/
|
||||
uint32_t encr_cntr_mask_0;
|
||||
/* iv1 - bits 95:64 - CRYPTO_CNTR_MASK_REG1*/
|
||||
uint32_t encr_cntr_mask_1;
|
||||
/* iv2 - bits 63:32 - CRYPTO_CNTR_MASK_REG2*/
|
||||
uint32_t encr_cntr_mask_2;
|
||||
/* iv3 - bits 31:0 - CRYPTO_CNTR_MASK_REG*/
|
||||
uint32_t encr_cntr_mask_3;
|
||||
};
|
||||
|
||||
struct ce_bam_info {
|
||||
@@ -179,14 +194,14 @@ struct ce_bam_info {
|
||||
uint32_t ce_device;
|
||||
uint32_t ce_hw_instance;
|
||||
uint32_t bam_ee;
|
||||
unsigned int pipe_pair_index;
|
||||
unsigned int src_pipe_index;
|
||||
unsigned int dest_pipe_index;
|
||||
unsigned int pipe_pair_index[QCE_OFFLOAD_OPER_LAST];
|
||||
unsigned int src_pipe_index[QCE_OFFLOAD_OPER_LAST];
|
||||
unsigned int dest_pipe_index[QCE_OFFLOAD_OPER_LAST];
|
||||
unsigned long bam_handle;
|
||||
int ce_burst_size;
|
||||
uint32_t minor_version;
|
||||
struct qce_sps_ep_conn_data producer;
|
||||
struct qce_sps_ep_conn_data consumer;
|
||||
struct qce_sps_ep_conn_data producer[QCE_OFFLOAD_OPER_LAST];
|
||||
struct qce_sps_ep_conn_data consumer[QCE_OFFLOAD_OPER_LAST];
|
||||
};
|
||||
|
||||
/* SPS data structure with buffers, commandlists & commmand pointer lists */
|
||||
@@ -227,6 +242,7 @@ struct ce_request_info {
|
||||
dma_addr_t phy_ota_dst;
|
||||
unsigned int ota_size;
|
||||
unsigned int req_len;
|
||||
unsigned int offload_op;
|
||||
};
|
||||
|
||||
struct qce_driver_stats {
|
||||
|
@@ -34,8 +34,11 @@
|
||||
|
||||
#include <linux/compat.h>
|
||||
|
||||
#define CACHE_LINE_SIZE 32
|
||||
#define CACHE_LINE_SIZE 64
|
||||
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
|
||||
#define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
|
||||
/* Max wait time once a crypt o request is done */
|
||||
#define MAX_CRYPTO_WAIT_TIME 1500
|
||||
|
||||
static uint8_t _std_init_vector_sha1_uint8[] = {
|
||||
0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
|
||||
@@ -50,6 +53,13 @@ static uint8_t _std_init_vector_sha256_uint8[] = {
|
||||
0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
|
||||
};
|
||||
|
||||
#define QCEDEV_CTX_KEY_MASK 0x000000ff
|
||||
#define QCEDEV_CTX_USE_HW_KEY 0x00000001
|
||||
#define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
|
||||
|
||||
#define QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK 0x000000FF
|
||||
#define QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK 0x00000003
|
||||
|
||||
static DEFINE_MUTEX(send_cmd_lock);
|
||||
static DEFINE_MUTEX(qcedev_sent_bw_req);
|
||||
static DEFINE_MUTEX(hash_access_lock);
|
||||
@@ -184,8 +194,12 @@ exit_unlock_mutex:
|
||||
|
||||
static int qcedev_open(struct inode *inode, struct file *file);
|
||||
static int qcedev_release(struct inode *inode, struct file *file);
|
||||
static int start_cipher_req(struct qcedev_control *podev);
|
||||
static int start_sha_req(struct qcedev_control *podev);
|
||||
static int start_cipher_req(struct qcedev_control *podev,
|
||||
int *current_req_info);
|
||||
static int start_offload_cipher_req(struct qcedev_control *podev,
|
||||
int *current_req_info);
|
||||
static int start_sha_req(struct qcedev_control *podev,
|
||||
int *current_req_info);
|
||||
|
||||
static const struct file_operations qcedev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
@@ -283,6 +297,7 @@ static void req_done(unsigned long data)
|
||||
unsigned long flags = 0;
|
||||
struct qcedev_async_req *new_req = NULL;
|
||||
int ret = 0;
|
||||
int current_req_info = 0;
|
||||
|
||||
spin_lock_irqsave(&podev->lock, flags);
|
||||
areq = podev->active_command;
|
||||
@@ -296,9 +311,11 @@ again:
|
||||
podev->active_command = new_req;
|
||||
new_req->err = 0;
|
||||
if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
|
||||
ret = start_cipher_req(podev);
|
||||
ret = start_cipher_req(podev, ¤t_req_info);
|
||||
else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
|
||||
ret = start_offload_cipher_req(podev, ¤t_req_info);
|
||||
else
|
||||
ret = start_sha_req(podev);
|
||||
ret = start_sha_req(podev, ¤t_req_info);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&podev->lock, flags);
|
||||
@@ -361,7 +378,8 @@ void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
|
||||
tasklet_schedule(&podev->done_tasklet);
|
||||
};
|
||||
|
||||
static int start_cipher_req(struct qcedev_control *podev)
|
||||
static int start_cipher_req(struct qcedev_control *podev,
|
||||
int *current_req_info)
|
||||
{
|
||||
struct qcedev_async_req *qcedev_areq;
|
||||
struct qce_req creq;
|
||||
@@ -454,16 +472,125 @@ static int start_cipher_req(struct qcedev_control *podev)
|
||||
creq.qce_cb = qcedev_cipher_req_cb;
|
||||
creq.areq = (void *)&qcedev_areq->cipher_req;
|
||||
creq.flags = 0;
|
||||
creq.offload_op = 0;
|
||||
ret = qce_ablk_cipher_req(podev->qce, &creq);
|
||||
*current_req_info = creq.current_req_info;
|
||||
unsupported:
|
||||
if (ret)
|
||||
qcedev_areq->err = -ENXIO;
|
||||
else
|
||||
qcedev_areq->err = 0;
|
||||
qcedev_areq->err = ret ? -ENXIO : 0
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
static int start_sha_req(struct qcedev_control *podev)
|
||||
void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
|
||||
unsigned char *iv, int ret)
|
||||
{
|
||||
struct qcedev_cipher_req *areq;
|
||||
struct qcedev_handle *handle;
|
||||
struct qcedev_control *podev;
|
||||
struct qcedev_async_req *qcedev_areq;
|
||||
|
||||
areq = (struct qcedev_cipher_req *) cookie;
|
||||
handle = (struct qcedev_handle *) areq->cookie;
|
||||
podev = handle->cntl;
|
||||
qcedev_areq = podev->active_command;
|
||||
|
||||
if (iv)
|
||||
memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
|
||||
qcedev_areq->offload_cipher_op_req.ivlen);
|
||||
|
||||
tasklet_schedule(&podev->done_tasklet);
|
||||
}
|
||||
|
||||
static int start_offload_cipher_req(struct qcedev_control *podev,
|
||||
int *current_req_info)
|
||||
{
|
||||
struct qcedev_async_req *qcedev_areq;
|
||||
struct qce_req creq;
|
||||
u8 patt_sz = 0, proc_data_sz = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Start the command on the podev->active_command */
|
||||
qcedev_areq = podev->active_command;
|
||||
qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
|
||||
|
||||
switch (qcedev_areq->offload_cipher_op_req.alg) {
|
||||
case QCEDEV_ALG_AES:
|
||||
creq.alg = CIPHER_ALG_AES;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (qcedev_areq->offload_cipher_op_req.mode) {
|
||||
case QCEDEV_AES_MODE_CBC:
|
||||
creq.mode = QCE_MODE_CBC;
|
||||
break;
|
||||
case QCEDEV_AES_MODE_CTR:
|
||||
creq.mode = QCE_MODE_CTR;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
|
||||
creq.dir = QCE_ENCRYPT;
|
||||
} else {
|
||||
switch(qcedev_areq->offload_cipher_op_req.op) {
|
||||
case QCEDEV_OFFLOAD_HLOS_HLOS:
|
||||
case QCEDEV_OFFLOAD_HLOS_CPB:
|
||||
creq.dir = QCE_DECRYPT;
|
||||
break;
|
||||
case QCEDEV_OFFLOAD_CPB_HLOS:
|
||||
creq.dir = QCE_ENCRYPT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
|
||||
creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
|
||||
creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
|
||||
|
||||
creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
|
||||
|
||||
/* OFFLOAD use cases use PIPE keys so no need to set keys */
|
||||
creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
|
||||
creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
|
||||
creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
|
||||
if (qcedev_areq->offload_cipher_op_req.is_copy_op)
|
||||
creq.is_copy_op = true;
|
||||
|
||||
creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
|
||||
|
||||
creq.qce_cb = qcedev_offload_cipher_req_cb;
|
||||
creq.areq = (void *)&qcedev_areq->cipher_req;
|
||||
|
||||
patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
|
||||
proc_data_sz =
|
||||
qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
|
||||
creq.is_pattern_valid =
|
||||
qcedev_areq->offload_cipher_op_req.is_pattern_valid;
|
||||
if (creq.is_pattern_valid) {
|
||||
creq.pattern_info = 0x1;
|
||||
if (patt_sz)
|
||||
creq.pattern_info |= (patt_sz - 1) << 4;
|
||||
if (proc_data_sz)
|
||||
creq.pattern_info |= (proc_data_sz - 1) << 8;
|
||||
creq.pattern_info |=
|
||||
qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
|
||||
}
|
||||
creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
|
||||
ret = qce_ablk_cipher_req(podev->qce, &creq);
|
||||
|
||||
*current_req_info = creq.current_req_info;
|
||||
qcedev_areq->err = ret ? -ENXIO : 0
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int start_sha_req(struct qcedev_control *podev,
|
||||
int *current_req_info)
|
||||
{
|
||||
struct qcedev_async_req *qcedev_areq;
|
||||
struct qce_sha_req sreq;
|
||||
@@ -532,13 +659,37 @@ static int start_sha_req(struct qcedev_control *podev)
|
||||
|
||||
ret = qce_process_sha_req(podev->qce, &sreq);
|
||||
|
||||
if (ret)
|
||||
qcedev_areq->err = -ENXIO;
|
||||
else
|
||||
qcedev_areq->err = 0;
|
||||
*current_req_info = sreq.current_req_info;
|
||||
qcedev_areq->err = ret ? -ENXIO : 0
|
||||
|
||||
return ret;
|
||||
};
|
||||
|
||||
static void qcedev_check_crypto_status(
|
||||
struct qcedev_async_req *qcedev_areq, void *handle,
|
||||
bool print_err)
|
||||
{
|
||||
unsigned int s1, s2, s3, s4, s5, s6;
|
||||
|
||||
qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
|
||||
qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6);
|
||||
|
||||
if (print_err) {
|
||||
pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
|
||||
s1, s2, s3, s4, s5, s6);
|
||||
}
|
||||
if ((s6 & QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK) ||
|
||||
(s3 & QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK)) {
|
||||
pr_info("%s: crypto timer expired\n", __func__);
|
||||
pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
|
||||
s1, s2, s3, s4, s5, s6);
|
||||
qcedev_areq->offload_cipher_op_req.err =
|
||||
QCEDEV_OFFLOAD_TIMER_ERROR;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int submit_req(struct qcedev_async_req *qcedev_areq,
|
||||
struct qcedev_handle *handle)
|
||||
{
|
||||
@@ -546,18 +697,27 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
|
||||
unsigned long flags = 0;
|
||||
int ret = 0;
|
||||
struct qcedev_stat *pstat;
|
||||
int current_req_info = 0;
|
||||
int wait = 0;
|
||||
bool print_sts = false;
|
||||
|
||||
qcedev_areq->err = 0;
|
||||
podev = handle->cntl;
|
||||
|
||||
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
|
||||
if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&podev->lock, flags);
|
||||
|
||||
if (podev->active_command == NULL) {
|
||||
podev->active_command = qcedev_areq;
|
||||
if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
|
||||
ret = start_cipher_req(podev);
|
||||
ret = start_cipher_req(podev, ¤t_req_info);
|
||||
else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
|
||||
ret = start_offload_cipher_req(podev, ¤t_req_info);
|
||||
else
|
||||
ret = start_sha_req(podev);
|
||||
ret = start_sha_req(podev, ¤t_req_info);
|
||||
} else {
|
||||
list_add_tail(&qcedev_areq->list, &podev->ready_commands);
|
||||
}
|
||||
@@ -568,11 +728,30 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
|
||||
spin_unlock_irqrestore(&podev->lock, flags);
|
||||
|
||||
if (ret == 0)
|
||||
wait_for_completion(&qcedev_areq->complete);
|
||||
wait = wait_for_completion_timeout(&qcedev_areq->complete,
|
||||
msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
|
||||
|
||||
if (!wait) {
|
||||
/*
|
||||
* This means wait timed out, and the callback routine was not
|
||||
* exercised. The callback sequence does some housekeeping which
|
||||
* would be missed here, hence having a call to qce here to do
|
||||
* that.
|
||||
*/
|
||||
pr_err("%s: wait timed out, req info = %d\n", __func__,
|
||||
current_req_info);
|
||||
print_sts = true;
|
||||
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
|
||||
qce_manage_timeout(podev->qce, current_req_info);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
qcedev_areq->err = -EIO;
|
||||
|
||||
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
|
||||
if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
|
||||
return 0;
|
||||
|
||||
pstat = &_qcedev_stat;
|
||||
if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
|
||||
switch (qcedev_areq->cipher_op_req.op) {
|
||||
@@ -591,6 +770,8 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
|
||||
//Do nothing
|
||||
} else {
|
||||
if (qcedev_areq->err)
|
||||
pstat->qcedev_sha_fail++;
|
||||
@@ -1417,6 +1598,72 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
|
||||
|
||||
}
|
||||
|
||||
static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
|
||||
struct qcedev_handle *handle)
|
||||
{
|
||||
int i = 0;
|
||||
int err = 0;
|
||||
size_t byteoffset = 0;
|
||||
size_t transfer_data_len = 0;
|
||||
size_t pending_data_len = 0;
|
||||
size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
|
||||
uint8_t *user_src = NULL;
|
||||
uint8_t *user_dst = NULL;
|
||||
struct scatterlist sg_src;
|
||||
struct scatterlist sg_dst;
|
||||
|
||||
if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
|
||||
byteoffset = areq->offload_cipher_op_req.byteoffset;
|
||||
|
||||
/*
|
||||
* areq has two components:
|
||||
* a) Request that comes from userspace i.e. offload_cipher_op_req
|
||||
* b) Request that QCE understands - skcipher i.e. cipher_req.creq
|
||||
* skcipher has sglist pointers src and dest that would carry
|
||||
* data to/from CE.
|
||||
*/
|
||||
areq->cipher_req.creq.src = &sg_src;
|
||||
areq->cipher_req.creq.dst = &sg_dst;
|
||||
sg_init_table(&sg_src, 1);
|
||||
sg_init_table(&sg_dst, 1);
|
||||
|
||||
for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
|
||||
transfer_data_len = 0;
|
||||
pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
|
||||
user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
|
||||
user_src += byteoffset;
|
||||
|
||||
user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
|
||||
user_dst += byteoffset;
|
||||
|
||||
areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
|
||||
|
||||
while (pending_data_len) {
|
||||
transfer_data_len = min(max_data_xfer,
|
||||
pending_data_len);
|
||||
sg_src.dma_address = (dma_addr_t)user_src;
|
||||
sg_dst.dma_address = (dma_addr_t)user_dst;
|
||||
areq->cipher_req.creq.cryptlen = transfer_data_len;
|
||||
|
||||
sg_src.length = transfer_data_len;
|
||||
sg_dst.length = transfer_data_len;
|
||||
|
||||
err = submit_req(areq, handle);
|
||||
if (err) {
|
||||
pr_err("%s: Error processing req, err = %d\n",
|
||||
__func__, err);
|
||||
goto exit;
|
||||
}
|
||||
/* update data len to be processed */
|
||||
pending_data_len -= transfer_data_len;
|
||||
user_src += transfer_data_len;
|
||||
user_dst += transfer_data_len;
|
||||
}
|
||||
}
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
|
||||
struct qcedev_control *podev)
|
||||
{
|
||||
@@ -1663,6 +1910,138 @@ sha_error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
|
||||
struct qcedev_control *podev)
|
||||
{
|
||||
if (req->encklen == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* AES-192 is not a valid option for OFFLOAD use case */
|
||||
if ((req->encklen != QCEDEV_AES_KEY_128) &&
|
||||
(req->encklen != QCEDEV_AES_KEY_256)) {
|
||||
pr_err("%s: unsupported key size %d\n",
|
||||
__func__, req->encklen);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
|
||||
struct qcedev_control *podev)
|
||||
{
|
||||
uint32_t total = 0;
|
||||
int i = 0;
|
||||
|
||||
if ((req->entries == 0) || (req->data_len == 0) ||
|
||||
(req->entries > QCEDEV_MAX_BUFFERS)) {
|
||||
pr_err("%s: Invalid cipher length/entries\n", __func__);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ((req->alg != QCEDEV_ALG_AES) ||
|
||||
(req->mode > QCEDEV_AES_MODE_CTR)) {
|
||||
pr_err("%s: Invalid algorithm %d\n", __func__,
|
||||
(uint32_t)req->alg);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (qcedev_check_offload_cipher_key(req, podev))
|
||||
goto error;
|
||||
|
||||
if (req->block_offset >= AES_CE_BLOCK_SIZE)
|
||||
goto error;
|
||||
|
||||
/* if using a byteoffset, make sure it is CTR mode using vbuf */
|
||||
if (req->byteoffset) {
|
||||
if (req->mode != QCEDEV_AES_MODE_CTR) {
|
||||
pr_err("%s: Operation on byte offset not supported\n",
|
||||
__func__);
|
||||
goto error;
|
||||
}
|
||||
if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
|
||||
pr_err("%s: Invalid byte offset\n", __func__);
|
||||
goto error;
|
||||
}
|
||||
total = req->byteoffset;
|
||||
for (i = 0; i < req->entries; i++) {
|
||||
if (total > U32_MAX - req->vbuf.src[i].len) {
|
||||
pr_err("%s:Int overflow on total src len\n",
|
||||
__func__);
|
||||
goto error;
|
||||
}
|
||||
total += req->vbuf.src[i].len;
|
||||
}
|
||||
}
|
||||
|
||||
if (req->data_len < req->byteoffset) {
|
||||
pr_err("%s: req data length %u is less than byteoffset %u\n",
|
||||
__func__, req->data_len, req->byteoffset);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Ensure IV size */
|
||||
if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
|
||||
pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Ensure Key size */
|
||||
if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
|
||||
pr_err("%s: Klen is not correct: %u\n", __func__,
|
||||
req->encklen);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Check for sum of all dst length is equal to data_len */
|
||||
for (i = 0, total = 0; i < req->entries; i++) {
|
||||
if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
|
||||
pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
|
||||
__func__, i, req->vbuf.dst[i].len);
|
||||
goto error;
|
||||
}
|
||||
if (req->vbuf.dst[i].len >= U32_MAX - total) {
|
||||
pr_err("%s: Int overflow on total req dst vbuf len\n",
|
||||
__func__);
|
||||
goto error;
|
||||
}
|
||||
total += req->vbuf.dst[i].len;
|
||||
}
|
||||
|
||||
if (total != req->data_len) {
|
||||
pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
|
||||
__func__, i, total, req->data_len);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Check for sum of all src length is equal to data_len */
|
||||
for (i = 0, total = 0; i < req->entries; i++) {
|
||||
if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
|
||||
pr_err("%s: NULL req src vbuf[%d] with length %d\n",
|
||||
__func__, i, req->vbuf.src[i].len);
|
||||
goto error;
|
||||
}
|
||||
if (req->vbuf.src[i].len > U32_MAX - total) {
|
||||
pr_err("%s: Int overflow on total req src vbuf len\n",
|
||||
__func__);
|
||||
goto error;
|
||||
}
|
||||
total += req->vbuf.src[i].len;
|
||||
}
|
||||
|
||||
if (total != req->data_len) {
|
||||
pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
|
||||
__func__, total, req->data_len);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
long qcedev_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
@@ -1727,6 +2106,33 @@ long qcedev_ioctl(struct file *file,
|
||||
}
|
||||
break;
|
||||
|
||||
case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
|
||||
if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
|
||||
(void __user *)arg,
|
||||
sizeof(struct qcedev_offload_cipher_op_req))) {
|
||||
err = -EFAULT;
|
||||
goto exit_free_qcedev_areq;
|
||||
}
|
||||
|
||||
qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
|
||||
if (qcedev_check_offload_cipher_params(
|
||||
&qcedev_areq->offload_cipher_op_req, podev)) {
|
||||
err = -EINVAL;
|
||||
goto exit_free_qcedev_areq;
|
||||
}
|
||||
|
||||
err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
|
||||
if (err)
|
||||
goto exit_free_qcedev_areq;
|
||||
|
||||
if (copy_to_user((void __user *)arg,
|
||||
&qcedev_areq->offload_cipher_op_req,
|
||||
sizeof(struct qcedev_offload_cipher_op_req))) {
|
||||
err = -EFAULT;
|
||||
goto exit_free_qcedev_areq;
|
||||
}
|
||||
break;
|
||||
|
||||
case QCEDEV_IOCTL_SHA_INIT_REQ:
|
||||
{
|
||||
struct scatterlist sg_src;
|
||||
@@ -1944,8 +2350,8 @@ long qcedev_ioctl(struct file *file,
|
||||
goto exit_free_qcedev_areq;
|
||||
}
|
||||
map_buf.buf_vaddr[i] = vaddr;
|
||||
pr_info("%s: info: vaddr = %llx\n",
|
||||
__func__, vaddr);
|
||||
pr_info("%s: info: vaddr = %llx\n, fd = %d",
|
||||
__func__, vaddr, map_buf.fd[i]);
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *)arg, &map_buf,
|
||||
|
@@ -16,12 +16,13 @@
|
||||
#include "qce.h"
|
||||
#include "qcedev_smmu.h"
|
||||
|
||||
#define CACHE_LINE_SIZE 32
|
||||
#define CACHE_LINE_SIZE 64
|
||||
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
|
||||
|
||||
enum qcedev_crypto_oper_type {
|
||||
QCEDEV_CRYPTO_OPER_CIPHER = 0,
|
||||
QCEDEV_CRYPTO_OPER_SHA = 1,
|
||||
QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER = 2,
|
||||
QCEDEV_CRYPTO_OPER_LAST
|
||||
};
|
||||
|
||||
@@ -56,6 +57,7 @@ struct qcedev_async_req {
|
||||
union {
|
||||
struct qcedev_cipher_op_req cipher_op_req;
|
||||
struct qcedev_sha_op_req sha_op_req;
|
||||
struct qcedev_offload_cipher_op_req offload_cipher_op_req;
|
||||
};
|
||||
|
||||
union {
|
||||
|
@@ -26,6 +26,11 @@
|
||||
|
||||
#define CRYPTO_STATUS_REG 0x1A100
|
||||
#define CRYPTO_STATUS2_REG 0x1A104
|
||||
#define CRYPTO_STATUS3_REG 0x1A11C
|
||||
#define CRYPTO_STATUS4_REG 0x1A124
|
||||
#define CRYPTO_STATUS5_REG 0x1A128
|
||||
#define CRYPTO_STATUS6_REG 0x1A13C
|
||||
|
||||
#define CRYPTO_ENGINES_AVAIL 0x1A108
|
||||
#define CRYPTO_FIFO_SIZES_REG 0x1A10C
|
||||
|
||||
@@ -37,6 +42,8 @@
|
||||
#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
|
||||
#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
|
||||
#define CRYPTO_ENCR_SEG_START_REG 0x1A208
|
||||
#define CRYPTO_DATA_PATT_PROC_CFG_REG 0x1A500
|
||||
#define CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG 0x1A504
|
||||
|
||||
#define CRYPTO_ENCR_KEY0_REG 0x1D000
|
||||
#define CRYPTO_ENCR_KEY1_REG 0x1D004
|
||||
|
@@ -41,6 +41,31 @@ enum qcedev_oper_enum {
|
||||
QCEDEV_OPER_LAST
|
||||
};
|
||||
|
||||
/**
|
||||
*qcedev_offload_oper_enum: Offload operation types (uses pipe keys)
|
||||
* @QCEDEV_OFFLOAD_HLOS_HLOS: Non-secure to non-secure (eg. audio dec).
|
||||
* @QCEDEV_OFFLOAD_HLOS_CPB: Non-secure to secure (eg. video dec).
|
||||
* @QCEDEV_OFFLOAD_CPB_HLOS: Secure to non-secure (eg. hdcp video enc).
|
||||
*/
|
||||
enum qcedev_offload_oper_enum {
|
||||
QCEDEV_OFFLOAD_HLOS_HLOS = 1,
|
||||
QCEDEV_OFFLOAD_HLOS_CPB = 2,
|
||||
QCEDEV_OFFLOAD_CPB_HLOS = 3,
|
||||
QCEDEV_OFFLOAD_OPER_LAST
|
||||
};
|
||||
|
||||
/**
|
||||
*qcedev_offload_err_enum: Offload error conditions
|
||||
* @QCEDEV_OFFLOAD_NO_ERROR: Successful crypto operation.
|
||||
* @QCEDEV_OFFLOAD_GENERIC_ERROR: Generic error in crypto status.
|
||||
* @QCEDEV_OFFLOAD_TIMER_ERROR: Pipe key timer errors in crypto status.
|
||||
*/
|
||||
enum qcedev_offload_err_enum {
|
||||
QCEDEV_OFFLOAD_NO_ERROR = 0,
|
||||
QCEDEV_OFFLOAD_GENERIC_ERROR = 1,
|
||||
QCEDEV_OFFLOAD_TIMER_ERROR = 2
|
||||
};
|
||||
|
||||
/**
|
||||
*qcedev_oper_enum: Cipher algorithm types
|
||||
* @QCEDEV_ALG_DES: DES
|
||||
@@ -223,6 +248,72 @@ struct qcedev_sha_op_req {
|
||||
enum qcedev_sha_alg_enum alg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pattern_info - Holds pattern information for pattern-based
|
||||
* decryption/encryption for AES ECB, counter, and CBC modes.
|
||||
* @patt_sz (IN): Total number of blocks.
|
||||
* @proc_data_sz (IN): Number of blocks to be processed.
|
||||
* @patt_offset (IN): Start of the segment.
|
||||
*/
|
||||
struct pattern_info {
|
||||
__u8 patt_sz;
|
||||
__u8 proc_data_sz;
|
||||
__u8 patt_offset;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qcedev_offload_cipher_op_req - Holds the offload request information
|
||||
* @vbuf (IN/OUT): Stores Source and destination Buffer information.
|
||||
* Refer to struct qcedev_vbuf_info.
|
||||
* @entries (IN): Number of entries to be processed as part of request.
|
||||
* @data_len (IN): Total Length of input/src and output/dst in bytes
|
||||
* @in_place_op (IN): Indicates whether the operation is inplace where
|
||||
* source == destination.
|
||||
* @encklen (IN): Length of the encryption key(set to 128 bits/16
|
||||
* bytes in the driver).
|
||||
* @iv (IN/OUT): Initialisation vector data
|
||||
* This is updated by the driver, incremented by
|
||||
* number of blocks encrypted/decrypted.
|
||||
* @ivlen (IN): Length of the IV.
|
||||
* @iv_ctr_size (IN): IV counter increment mask size.
|
||||
* Driver sets the mask value based on this size.
|
||||
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
|
||||
* for AES-128 CTR mode only).
|
||||
* @block_offset (IN): Offset in the block that needs a skip of encrypt/
|
||||
* decrypt.
|
||||
* @pattern_valid (IN): Indicates the request contains a valid pattern.
|
||||
* @pattern_info (IN): The pattern to be used for the offload request.
|
||||
* @is_copy_op (IN): Offload operations sometimes requires a copy between
|
||||
* secure and non-secure buffers without any encrypt/
|
||||
* decrypt operations.
|
||||
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES.
|
||||
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR.
|
||||
* Applicable when using AES algorithm only.
|
||||
* @op (IN): Type of operation.
|
||||
* Refer to qcedev_offload_oper_enum.
|
||||
* @err (OUT): Error in crypto status.
|
||||
* Refer to qcedev_offload_err_enum.
|
||||
*/
|
||||
struct qcedev_offload_cipher_op_req {
|
||||
struct qcedev_vbuf_info vbuf;
|
||||
__u32 entries;
|
||||
__u32 data_len;
|
||||
__u32 in_place_op;
|
||||
__u32 encklen;
|
||||
__u8 iv[QCEDEV_MAX_IV_SIZE];
|
||||
__u32 ivlen;
|
||||
__u32 iv_ctr_size;
|
||||
__u32 byteoffset;
|
||||
__u8 block_offset;
|
||||
__u8 is_pattern_valid;
|
||||
__u8 is_copy_op;
|
||||
struct pattern_info pattern_info;
|
||||
enum qcedev_cipher_alg_enum alg;
|
||||
enum qcedev_cipher_mode_enum mode;
|
||||
enum qcedev_offload_oper_enum op;
|
||||
enum qcedev_offload_err_enum err;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qfips_verify_t - Holds data for FIPS Integrity test
|
||||
* @kernel_size (IN): Size of kernel Image
|
||||
@@ -286,4 +377,6 @@ struct file;
|
||||
_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
|
||||
#define QCEDEV_IOCTL_UNMAP_BUF_REQ \
|
||||
_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
|
||||
#define QCEDEV_IOCTL_OFFLOAD_OP_REQ \
|
||||
_IOWR(QCEDEV_IOC_MAGIC, 12, struct qcedev_offload_cipher_op_req)
|
||||
#endif /* _QCEDEV__H */
|
||||
|
@@ -55,6 +55,7 @@ struct qce_f8_req {
|
||||
__u8 ckey[OTA_KEY_SIZE];
|
||||
enum qce_ota_dir_enum direction;
|
||||
enum qce_ota_algo_enum algorithm;
|
||||
int current_req_info;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -202,6 +203,7 @@ struct qce_f9_req {
|
||||
enum qce_ota_dir_enum direction;
|
||||
__u8 ikey[OTA_KEY_SIZE];
|
||||
enum qce_ota_algo_enum algorithm;
|
||||
int current_req_info;
|
||||
};
|
||||
|
||||
#define QCOTA_IOC_MAGIC 0x85
|
||||
|
Reference in New Issue
Block a user