crypto-qti: qcedev: add support for hlos offload path

Add support for HLOS offload data path in the qcedev driver
mainly to support DRM and HDCP usecases.

Changes extend the current driver to support the following.
- Register multiple pipes for different offload usecases.
- Report timer expiry errors back to userspace.
- Support different iv CTR sizes based on userspace input.
- Support new IOCTLS to support encryption, decryption and
  copy offload usecases for DRM and HDCP.

Change-Id: Ie9b74c173d0afd7b8c863ed57a68ec6e74baa9b4
This commit is contained in:
Gaurav Kashyap
2022-03-01 10:29:43 -08:00
parent 177899dde3
commit 31f097f988
9 changed files with 1014 additions and 161 deletions

2
Kbuild
View File

@@ -17,7 +17,7 @@ tz_log_dlkm-objs := tz_log/tz_log.o
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o
qce50_dlkm-objs := crypto-qti/qce50.o qce50_dlkm-objs := crypto-qti/qce50.o
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcedev-mod_dlkm.o obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o
qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o

View File

@@ -105,6 +105,14 @@ enum qce_req_op_enum {
QCE_REQ_LAST QCE_REQ_LAST
}; };
/* Offload operation type */
enum qce_offload_op_enum {
QCE_OFFLOAD_HLOS_HLOS = 1,
QCE_OFFLOAD_HLOS_CPB = 2,
QCE_OFFLOAD_CPB_HLOS = 3,
QCE_OFFLOAD_OPER_LAST
};
/* Algorithms/features supported in CE HW engine */ /* Algorithms/features supported in CE HW engine */
struct ce_hw_support { struct ce_hw_support {
bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/ bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
@@ -147,6 +155,7 @@ struct qce_sha_req {
unsigned int size; /* data length in bytes */ unsigned int size; /* data length in bytes */
void *areq; void *areq;
unsigned int flags; unsigned int flags;
int current_req_info;
}; };
struct qce_req { struct qce_req {
@@ -168,10 +177,17 @@ struct qce_req {
unsigned int encklen; /* cipher key length */ unsigned int encklen; /* cipher key length */
unsigned char *iv; /* initialization vector */ unsigned char *iv; /* initialization vector */
unsigned int ivsize; /* initialization vector size*/ unsigned int ivsize; /* initialization vector size*/
unsigned int iv_ctr_size; /* iv increment counter size*/
unsigned int cryptlen; /* data length */ unsigned int cryptlen; /* data length */
unsigned int use_pmem; /* is source of data PMEM allocated? */ unsigned int use_pmem; /* is source of data PMEM allocated? */
struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/ struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
unsigned int flags; unsigned int flags;
enum qce_offload_op_enum offload_op; /* Offload usecase */
bool is_pattern_valid; /* Is pattern setting required */
unsigned int pattern_info; /* Pattern info for offload operation */
unsigned int block_offset; /* partial first block for AES CTR */
bool is_copy_op; /* copy buffers without crypto ops */
int current_req_info;
}; };
struct qce_pm_table { struct qce_pm_table {
@@ -192,5 +208,8 @@ int qce_disable_clk(void *handle);
void qce_get_driver_stats(void *handle); void qce_get_driver_stats(void *handle);
void qce_clear_driver_stats(void *handle); void qce_clear_driver_stats(void *handle);
void qce_dump_req(void *handle); void qce_dump_req(void *handle);
void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2,
unsigned int *s3, unsigned int *s4,
unsigned int *s5, unsigned int *s6);
int qce_manage_timeout(void *handle, int req_info);
#endif /* __CRYPTO_MSM_QCE_H */ #endif /* __CRYPTO_MSM_QCE_H */

File diff suppressed because it is too large Load Diff

View File

@@ -71,6 +71,7 @@ struct qce_cmdlist_info {
unsigned long cmdlist; unsigned long cmdlist;
struct sps_command_element *crypto_cfg; struct sps_command_element *crypto_cfg;
struct sps_command_element *crypto_cfg_le;
struct sps_command_element *encr_seg_cfg; struct sps_command_element *encr_seg_cfg;
struct sps_command_element *encr_seg_size; struct sps_command_element *encr_seg_size;
struct sps_command_element *encr_seg_start; struct sps_command_element *encr_seg_start;
@@ -78,8 +79,13 @@ struct qce_cmdlist_info {
struct sps_command_element *encr_xts_key; struct sps_command_element *encr_xts_key;
struct sps_command_element *encr_cntr_iv; struct sps_command_element *encr_cntr_iv;
struct sps_command_element *encr_ccm_cntr_iv; struct sps_command_element *encr_ccm_cntr_iv;
struct sps_command_element *encr_mask; struct sps_command_element *encr_mask_0;
struct sps_command_element *encr_mask_1;
struct sps_command_element *encr_mask_2;
struct sps_command_element *encr_mask_3;
struct sps_command_element *encr_xts_du_size; struct sps_command_element *encr_xts_du_size;
struct sps_command_element *pattern_info;
struct sps_command_element *block_offset;
struct sps_command_element *auth_seg_cfg; struct sps_command_element *auth_seg_cfg;
struct sps_command_element *auth_seg_size; struct sps_command_element *auth_seg_size;
@@ -170,6 +176,15 @@ struct qce_ce_cfg_reg_setting {
uint32_t auth_cfg_aead_sha256_hmac; uint32_t auth_cfg_aead_sha256_hmac;
uint32_t auth_cfg_kasumi; uint32_t auth_cfg_kasumi;
uint32_t auth_cfg_snow3g; uint32_t auth_cfg_snow3g;
/* iv0 - bits 127:96 - CRYPTO_CNTR_MASK_REG0*/
uint32_t encr_cntr_mask_0;
/* iv1 - bits 95:64 - CRYPTO_CNTR_MASK_REG1*/
uint32_t encr_cntr_mask_1;
/* iv2 - bits 63:32 - CRYPTO_CNTR_MASK_REG2*/
uint32_t encr_cntr_mask_2;
/* iv3 - bits 31:0 - CRYPTO_CNTR_MASK_REG*/
uint32_t encr_cntr_mask_3;
}; };
struct ce_bam_info { struct ce_bam_info {
@@ -179,14 +194,14 @@ struct ce_bam_info {
uint32_t ce_device; uint32_t ce_device;
uint32_t ce_hw_instance; uint32_t ce_hw_instance;
uint32_t bam_ee; uint32_t bam_ee;
unsigned int pipe_pair_index; unsigned int pipe_pair_index[QCE_OFFLOAD_OPER_LAST];
unsigned int src_pipe_index; unsigned int src_pipe_index[QCE_OFFLOAD_OPER_LAST];
unsigned int dest_pipe_index; unsigned int dest_pipe_index[QCE_OFFLOAD_OPER_LAST];
unsigned long bam_handle; unsigned long bam_handle;
int ce_burst_size; int ce_burst_size;
uint32_t minor_version; uint32_t minor_version;
struct qce_sps_ep_conn_data producer; struct qce_sps_ep_conn_data producer[QCE_OFFLOAD_OPER_LAST];
struct qce_sps_ep_conn_data consumer; struct qce_sps_ep_conn_data consumer[QCE_OFFLOAD_OPER_LAST];
}; };
/* SPS data structure with buffers, commandlists & commmand pointer lists */ /* SPS data structure with buffers, commandlists & commmand pointer lists */
@@ -227,6 +242,7 @@ struct ce_request_info {
dma_addr_t phy_ota_dst; dma_addr_t phy_ota_dst;
unsigned int ota_size; unsigned int ota_size;
unsigned int req_len; unsigned int req_len;
unsigned int offload_op;
}; };
struct qce_driver_stats { struct qce_driver_stats {

View File

@@ -34,8 +34,11 @@
#include <linux/compat.h> #include <linux/compat.h>
#define CACHE_LINE_SIZE 32 #define CACHE_LINE_SIZE 64
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
#define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
/* Max wait time once a crypt o request is done */
#define MAX_CRYPTO_WAIT_TIME 1500
static uint8_t _std_init_vector_sha1_uint8[] = { static uint8_t _std_init_vector_sha1_uint8[] = {
0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
@@ -50,6 +53,13 @@ static uint8_t _std_init_vector_sha256_uint8[] = {
0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
}; };
#define QCEDEV_CTX_KEY_MASK 0x000000ff
#define QCEDEV_CTX_USE_HW_KEY 0x00000001
#define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
#define QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK 0x000000FF
#define QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK 0x00000003
static DEFINE_MUTEX(send_cmd_lock); static DEFINE_MUTEX(send_cmd_lock);
static DEFINE_MUTEX(qcedev_sent_bw_req); static DEFINE_MUTEX(qcedev_sent_bw_req);
static DEFINE_MUTEX(hash_access_lock); static DEFINE_MUTEX(hash_access_lock);
@@ -184,8 +194,12 @@ exit_unlock_mutex:
static int qcedev_open(struct inode *inode, struct file *file); static int qcedev_open(struct inode *inode, struct file *file);
static int qcedev_release(struct inode *inode, struct file *file); static int qcedev_release(struct inode *inode, struct file *file);
static int start_cipher_req(struct qcedev_control *podev); static int start_cipher_req(struct qcedev_control *podev,
static int start_sha_req(struct qcedev_control *podev); int *current_req_info);
static int start_offload_cipher_req(struct qcedev_control *podev,
int *current_req_info);
static int start_sha_req(struct qcedev_control *podev,
int *current_req_info);
static const struct file_operations qcedev_fops = { static const struct file_operations qcedev_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
@@ -283,6 +297,7 @@ static void req_done(unsigned long data)
unsigned long flags = 0; unsigned long flags = 0;
struct qcedev_async_req *new_req = NULL; struct qcedev_async_req *new_req = NULL;
int ret = 0; int ret = 0;
int current_req_info = 0;
spin_lock_irqsave(&podev->lock, flags); spin_lock_irqsave(&podev->lock, flags);
areq = podev->active_command; areq = podev->active_command;
@@ -296,9 +311,11 @@ again:
podev->active_command = new_req; podev->active_command = new_req;
new_req->err = 0; new_req->err = 0;
if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER) if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
ret = start_cipher_req(podev); ret = start_cipher_req(podev, &current_req_info);
else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
ret = start_offload_cipher_req(podev, &current_req_info);
else else
ret = start_sha_req(podev); ret = start_sha_req(podev, &current_req_info);
} }
spin_unlock_irqrestore(&podev->lock, flags); spin_unlock_irqrestore(&podev->lock, flags);
@@ -361,7 +378,8 @@ void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
tasklet_schedule(&podev->done_tasklet); tasklet_schedule(&podev->done_tasklet);
}; };
static int start_cipher_req(struct qcedev_control *podev) static int start_cipher_req(struct qcedev_control *podev,
int *current_req_info)
{ {
struct qcedev_async_req *qcedev_areq; struct qcedev_async_req *qcedev_areq;
struct qce_req creq; struct qce_req creq;
@@ -454,16 +472,125 @@ static int start_cipher_req(struct qcedev_control *podev)
creq.qce_cb = qcedev_cipher_req_cb; creq.qce_cb = qcedev_cipher_req_cb;
creq.areq = (void *)&qcedev_areq->cipher_req; creq.areq = (void *)&qcedev_areq->cipher_req;
creq.flags = 0; creq.flags = 0;
creq.offload_op = 0;
ret = qce_ablk_cipher_req(podev->qce, &creq); ret = qce_ablk_cipher_req(podev->qce, &creq);
*current_req_info = creq.current_req_info;
unsupported: unsupported:
if (ret) qcedev_areq->err = ret ? -ENXIO : 0
qcedev_areq->err = -ENXIO;
else
qcedev_areq->err = 0;
return ret; return ret;
}; };
static int start_sha_req(struct qcedev_control *podev) void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
unsigned char *iv, int ret)
{
struct qcedev_cipher_req *areq;
struct qcedev_handle *handle;
struct qcedev_control *podev;
struct qcedev_async_req *qcedev_areq;
areq = (struct qcedev_cipher_req *) cookie;
handle = (struct qcedev_handle *) areq->cookie;
podev = handle->cntl;
qcedev_areq = podev->active_command;
if (iv)
memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
qcedev_areq->offload_cipher_op_req.ivlen);
tasklet_schedule(&podev->done_tasklet);
}
static int start_offload_cipher_req(struct qcedev_control *podev,
int *current_req_info)
{
struct qcedev_async_req *qcedev_areq;
struct qce_req creq;
u8 patt_sz = 0, proc_data_sz = 0;
int ret = 0;
/* Start the command on the podev->active_command */
qcedev_areq = podev->active_command;
qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
switch (qcedev_areq->offload_cipher_op_req.alg) {
case QCEDEV_ALG_AES:
creq.alg = CIPHER_ALG_AES;
break;
default:
return -EINVAL;
}
switch (qcedev_areq->offload_cipher_op_req.mode) {
case QCEDEV_AES_MODE_CBC:
creq.mode = QCE_MODE_CBC;
break;
case QCEDEV_AES_MODE_CTR:
creq.mode = QCE_MODE_CTR;
break;
default:
return -EINVAL;
}
if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
creq.dir = QCE_ENCRYPT;
} else {
switch(qcedev_areq->offload_cipher_op_req.op) {
case QCEDEV_OFFLOAD_HLOS_HLOS:
case QCEDEV_OFFLOAD_HLOS_CPB:
creq.dir = QCE_DECRYPT;
break;
case QCEDEV_OFFLOAD_CPB_HLOS:
creq.dir = QCE_ENCRYPT;
break;
default:
return -EINVAL;
}
}
creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
/* OFFLOAD use cases use PIPE keys so no need to set keys */
creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
if (qcedev_areq->offload_cipher_op_req.is_copy_op)
creq.is_copy_op = true;
creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
creq.qce_cb = qcedev_offload_cipher_req_cb;
creq.areq = (void *)&qcedev_areq->cipher_req;
patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
proc_data_sz =
qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
creq.is_pattern_valid =
qcedev_areq->offload_cipher_op_req.is_pattern_valid;
if (creq.is_pattern_valid) {
creq.pattern_info = 0x1;
if (patt_sz)
creq.pattern_info |= (patt_sz - 1) << 4;
if (proc_data_sz)
creq.pattern_info |= (proc_data_sz - 1) << 8;
creq.pattern_info |=
qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
}
creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
ret = qce_ablk_cipher_req(podev->qce, &creq);
*current_req_info = creq.current_req_info;
qcedev_areq->err = ret ? -ENXIO : 0
return ret;
}
static int start_sha_req(struct qcedev_control *podev,
int *current_req_info)
{ {
struct qcedev_async_req *qcedev_areq; struct qcedev_async_req *qcedev_areq;
struct qce_sha_req sreq; struct qce_sha_req sreq;
@@ -532,13 +659,37 @@ static int start_sha_req(struct qcedev_control *podev)
ret = qce_process_sha_req(podev->qce, &sreq); ret = qce_process_sha_req(podev->qce, &sreq);
if (ret) *current_req_info = sreq.current_req_info;
qcedev_areq->err = -ENXIO; qcedev_areq->err = ret ? -ENXIO : 0
else
qcedev_areq->err = 0;
return ret; return ret;
}; };
static void qcedev_check_crypto_status(
struct qcedev_async_req *qcedev_areq, void *handle,
bool print_err)
{
unsigned int s1, s2, s3, s4, s5, s6;
qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6);
if (print_err) {
pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
s1, s2, s3, s4, s5, s6);
}
if ((s6 & QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK) ||
(s3 & QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK)) {
pr_info("%s: crypto timer expired\n", __func__);
pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
s1, s2, s3, s4, s5, s6);
qcedev_areq->offload_cipher_op_req.err =
QCEDEV_OFFLOAD_TIMER_ERROR;
}
return;
}
static int submit_req(struct qcedev_async_req *qcedev_areq, static int submit_req(struct qcedev_async_req *qcedev_areq,
struct qcedev_handle *handle) struct qcedev_handle *handle)
{ {
@@ -546,18 +697,27 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
unsigned long flags = 0; unsigned long flags = 0;
int ret = 0; int ret = 0;
struct qcedev_stat *pstat; struct qcedev_stat *pstat;
int current_req_info = 0;
int wait = 0;
bool print_sts = false;
qcedev_areq->err = 0; qcedev_areq->err = 0;
podev = handle->cntl; podev = handle->cntl;
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
return 0;
spin_lock_irqsave(&podev->lock, flags); spin_lock_irqsave(&podev->lock, flags);
if (podev->active_command == NULL) { if (podev->active_command == NULL) {
podev->active_command = qcedev_areq; podev->active_command = qcedev_areq;
if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
ret = start_cipher_req(podev); ret = start_cipher_req(podev, &current_req_info);
else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
ret = start_offload_cipher_req(podev, &current_req_info);
else else
ret = start_sha_req(podev); ret = start_sha_req(podev, &current_req_info);
} else { } else {
list_add_tail(&qcedev_areq->list, &podev->ready_commands); list_add_tail(&qcedev_areq->list, &podev->ready_commands);
} }
@@ -568,11 +728,30 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
spin_unlock_irqrestore(&podev->lock, flags); spin_unlock_irqrestore(&podev->lock, flags);
if (ret == 0) if (ret == 0)
wait_for_completion(&qcedev_areq->complete); wait = wait_for_completion_timeout(&qcedev_areq->complete,
msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
if (!wait) {
/*
* This means wait timed out, and the callback routine was not
* exercised. The callback sequence does some housekeeping which
* would be missed here, hence having a call to qce here to do
* that.
*/
pr_err("%s: wait timed out, req info = %d\n", __func__,
current_req_info);
print_sts = true;
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
qce_manage_timeout(podev->qce, current_req_info);
}
if (ret) if (ret)
qcedev_areq->err = -EIO; qcedev_areq->err = -EIO;
qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
return 0;
pstat = &_qcedev_stat; pstat = &_qcedev_stat;
if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) { if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
switch (qcedev_areq->cipher_op_req.op) { switch (qcedev_areq->cipher_op_req.op) {
@@ -591,6 +770,8 @@ static int submit_req(struct qcedev_async_req *qcedev_areq,
default: default:
break; break;
} }
} else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
//Do nothing
} else { } else {
if (qcedev_areq->err) if (qcedev_areq->err)
pstat->qcedev_sha_fail++; pstat->qcedev_sha_fail++;
@@ -1417,6 +1598,72 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
} }
static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
struct qcedev_handle *handle)
{
int i = 0;
int err = 0;
size_t byteoffset = 0;
size_t transfer_data_len = 0;
size_t pending_data_len = 0;
size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
uint8_t *user_src = NULL;
uint8_t *user_dst = NULL;
struct scatterlist sg_src;
struct scatterlist sg_dst;
if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
byteoffset = areq->offload_cipher_op_req.byteoffset;
/*
* areq has two components:
* a) Request that comes from userspace i.e. offload_cipher_op_req
* b) Request that QCE understands - skcipher i.e. cipher_req.creq
* skcipher has sglist pointers src and dest that would carry
* data to/from CE.
*/
areq->cipher_req.creq.src = &sg_src;
areq->cipher_req.creq.dst = &sg_dst;
sg_init_table(&sg_src, 1);
sg_init_table(&sg_dst, 1);
for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
transfer_data_len = 0;
pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
user_src += byteoffset;
user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
user_dst += byteoffset;
areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
while (pending_data_len) {
transfer_data_len = min(max_data_xfer,
pending_data_len);
sg_src.dma_address = (dma_addr_t)user_src;
sg_dst.dma_address = (dma_addr_t)user_dst;
areq->cipher_req.creq.cryptlen = transfer_data_len;
sg_src.length = transfer_data_len;
sg_dst.length = transfer_data_len;
err = submit_req(areq, handle);
if (err) {
pr_err("%s: Error processing req, err = %d\n",
__func__, err);
goto exit;
}
/* update data len to be processed */
pending_data_len -= transfer_data_len;
user_src += transfer_data_len;
user_dst += transfer_data_len;
}
}
exit:
return err;
}
static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req, static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
struct qcedev_control *podev) struct qcedev_control *podev)
{ {
@@ -1663,6 +1910,138 @@ sha_error:
return -EINVAL; return -EINVAL;
} }
static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
struct qcedev_control *podev)
{
if (req->encklen == 0)
return -EINVAL;
/* AES-192 is not a valid option for OFFLOAD use case */
if ((req->encklen != QCEDEV_AES_KEY_128) &&
(req->encklen != QCEDEV_AES_KEY_256)) {
pr_err("%s: unsupported key size %d\n",
__func__, req->encklen);
goto error;
}
return 0;
error:
return -EINVAL;
}
static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
struct qcedev_control *podev)
{
uint32_t total = 0;
int i = 0;
if ((req->entries == 0) || (req->data_len == 0) ||
(req->entries > QCEDEV_MAX_BUFFERS)) {
pr_err("%s: Invalid cipher length/entries\n", __func__);
goto error;
}
if ((req->alg != QCEDEV_ALG_AES) ||
(req->mode > QCEDEV_AES_MODE_CTR)) {
pr_err("%s: Invalid algorithm %d\n", __func__,
(uint32_t)req->alg);
goto error;
}
if (qcedev_check_offload_cipher_key(req, podev))
goto error;
if (req->block_offset >= AES_CE_BLOCK_SIZE)
goto error;
/* if using a byteoffset, make sure it is CTR mode using vbuf */
if (req->byteoffset) {
if (req->mode != QCEDEV_AES_MODE_CTR) {
pr_err("%s: Operation on byte offset not supported\n",
__func__);
goto error;
}
if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
pr_err("%s: Invalid byte offset\n", __func__);
goto error;
}
total = req->byteoffset;
for (i = 0; i < req->entries; i++) {
if (total > U32_MAX - req->vbuf.src[i].len) {
pr_err("%s:Int overflow on total src len\n",
__func__);
goto error;
}
total += req->vbuf.src[i].len;
}
}
if (req->data_len < req->byteoffset) {
pr_err("%s: req data length %u is less than byteoffset %u\n",
__func__, req->data_len, req->byteoffset);
goto error;
}
/* Ensure IV size */
if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
goto error;
}
/* Ensure Key size */
if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
pr_err("%s: Klen is not correct: %u\n", __func__,
req->encklen);
goto error;
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
__func__, i, req->vbuf.dst[i].len);
goto error;
}
if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Int overflow on total req dst vbuf len\n",
__func__);
goto error;
}
total += req->vbuf.dst[i].len;
}
if (total != req->data_len) {
pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
__func__, i, total, req->data_len);
goto error;
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
pr_err("%s: NULL req src vbuf[%d] with length %d\n",
__func__, i, req->vbuf.src[i].len);
goto error;
}
if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Int overflow on total req src vbuf len\n",
__func__);
goto error;
}
total += req->vbuf.src[i].len;
}
if (total != req->data_len) {
pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
__func__, total, req->data_len);
goto error;
}
return 0;
error:
return -EINVAL;
}
long qcedev_ioctl(struct file *file, long qcedev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
@@ -1727,6 +2106,33 @@ long qcedev_ioctl(struct file *file,
} }
break; break;
case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
(void __user *)arg,
sizeof(struct qcedev_offload_cipher_op_req))) {
err = -EFAULT;
goto exit_free_qcedev_areq;
}
qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
if (qcedev_check_offload_cipher_params(
&qcedev_areq->offload_cipher_op_req, podev)) {
err = -EINVAL;
goto exit_free_qcedev_areq;
}
err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
if (err)
goto exit_free_qcedev_areq;
if (copy_to_user((void __user *)arg,
&qcedev_areq->offload_cipher_op_req,
sizeof(struct qcedev_offload_cipher_op_req))) {
err = -EFAULT;
goto exit_free_qcedev_areq;
}
break;
case QCEDEV_IOCTL_SHA_INIT_REQ: case QCEDEV_IOCTL_SHA_INIT_REQ:
{ {
struct scatterlist sg_src; struct scatterlist sg_src;
@@ -1944,8 +2350,8 @@ long qcedev_ioctl(struct file *file,
goto exit_free_qcedev_areq; goto exit_free_qcedev_areq;
} }
map_buf.buf_vaddr[i] = vaddr; map_buf.buf_vaddr[i] = vaddr;
pr_info("%s: info: vaddr = %llx\n", pr_info("%s: info: vaddr = %llx\n, fd = %d",
__func__, vaddr); __func__, vaddr, map_buf.fd[i]);
} }
if (copy_to_user((void __user *)arg, &map_buf, if (copy_to_user((void __user *)arg, &map_buf,

View File

@@ -16,12 +16,13 @@
#include "qce.h" #include "qce.h"
#include "qcedev_smmu.h" #include "qcedev_smmu.h"
#define CACHE_LINE_SIZE 32 #define CACHE_LINE_SIZE 64
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
enum qcedev_crypto_oper_type { enum qcedev_crypto_oper_type {
QCEDEV_CRYPTO_OPER_CIPHER = 0, QCEDEV_CRYPTO_OPER_CIPHER = 0,
QCEDEV_CRYPTO_OPER_SHA = 1, QCEDEV_CRYPTO_OPER_SHA = 1,
QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER = 2,
QCEDEV_CRYPTO_OPER_LAST QCEDEV_CRYPTO_OPER_LAST
}; };
@@ -56,6 +57,7 @@ struct qcedev_async_req {
union { union {
struct qcedev_cipher_op_req cipher_op_req; struct qcedev_cipher_op_req cipher_op_req;
struct qcedev_sha_op_req sha_op_req; struct qcedev_sha_op_req sha_op_req;
struct qcedev_offload_cipher_op_req offload_cipher_op_req;
}; };
union { union {

View File

@@ -26,6 +26,11 @@
#define CRYPTO_STATUS_REG 0x1A100 #define CRYPTO_STATUS_REG 0x1A100
#define CRYPTO_STATUS2_REG 0x1A104 #define CRYPTO_STATUS2_REG 0x1A104
#define CRYPTO_STATUS3_REG 0x1A11C
#define CRYPTO_STATUS4_REG 0x1A124
#define CRYPTO_STATUS5_REG 0x1A128
#define CRYPTO_STATUS6_REG 0x1A13C
#define CRYPTO_ENGINES_AVAIL 0x1A108 #define CRYPTO_ENGINES_AVAIL 0x1A108
#define CRYPTO_FIFO_SIZES_REG 0x1A10C #define CRYPTO_FIFO_SIZES_REG 0x1A10C
@@ -37,6 +42,8 @@
#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200 #define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204 #define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
#define CRYPTO_ENCR_SEG_START_REG 0x1A208 #define CRYPTO_ENCR_SEG_START_REG 0x1A208
#define CRYPTO_DATA_PATT_PROC_CFG_REG 0x1A500
#define CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG 0x1A504
#define CRYPTO_ENCR_KEY0_REG 0x1D000 #define CRYPTO_ENCR_KEY0_REG 0x1D000
#define CRYPTO_ENCR_KEY1_REG 0x1D004 #define CRYPTO_ENCR_KEY1_REG 0x1D004

View File

@@ -41,6 +41,31 @@ enum qcedev_oper_enum {
QCEDEV_OPER_LAST QCEDEV_OPER_LAST
}; };
/**
*qcedev_offload_oper_enum: Offload operation types (uses pipe keys)
* @QCEDEV_OFFLOAD_HLOS_HLOS: Non-secure to non-secure (eg. audio dec).
* @QCEDEV_OFFLOAD_HLOS_CPB: Non-secure to secure (eg. video dec).
* @QCEDEV_OFFLOAD_CPB_HLOS: Secure to non-secure (eg. hdcp video enc).
*/
enum qcedev_offload_oper_enum {
QCEDEV_OFFLOAD_HLOS_HLOS = 1,
QCEDEV_OFFLOAD_HLOS_CPB = 2,
QCEDEV_OFFLOAD_CPB_HLOS = 3,
QCEDEV_OFFLOAD_OPER_LAST
};
/**
*qcedev_offload_err_enum: Offload error conditions
* @QCEDEV_OFFLOAD_NO_ERROR: Successful crypto operation.
* @QCEDEV_OFFLOAD_GENERIC_ERROR: Generic error in crypto status.
* @QCEDEV_OFFLOAD_TIMER_ERROR: Pipe key timer errors in crypto status.
*/
enum qcedev_offload_err_enum {
QCEDEV_OFFLOAD_NO_ERROR = 0,
QCEDEV_OFFLOAD_GENERIC_ERROR = 1,
QCEDEV_OFFLOAD_TIMER_ERROR = 2
};
/** /**
*qcedev_oper_enum: Cipher algorithm types *qcedev_oper_enum: Cipher algorithm types
* @QCEDEV_ALG_DES: DES * @QCEDEV_ALG_DES: DES
@@ -223,6 +248,72 @@ struct qcedev_sha_op_req {
enum qcedev_sha_alg_enum alg; enum qcedev_sha_alg_enum alg;
}; };
/**
* struct pattern_info - Holds pattern information for pattern-based
* decryption/encryption for AES ECB, counter, and CBC modes.
* @patt_sz (IN): Total number of blocks.
* @proc_data_sz (IN): Number of blocks to be processed.
* @patt_offset (IN): Start of the segment.
*/
struct pattern_info {
__u8 patt_sz;
__u8 proc_data_sz;
__u8 patt_offset;
};
/**
* struct qcedev_offload_cipher_op_req - Holds the offload request information
* @vbuf (IN/OUT): Stores Source and destination Buffer information.
* Refer to struct qcedev_vbuf_info.
* @entries (IN): Number of entries to be processed as part of request.
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination.
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver).
* @iv (IN/OUT): Initialisation vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV.
* @iv_ctr_size (IN): IV counter increment mask size.
* Driver sets the mask value based on this size.
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only).
* @block_offset (IN): Offset in the block that needs a skip of encrypt/
* decrypt.
* @pattern_valid (IN): Indicates the request contains a valid pattern.
* @pattern_info (IN): The pattern to be used for the offload request.
* @is_copy_op (IN): Offload operations sometimes requires a copy between
* secure and non-secure buffers without any encrypt/
* decrypt operations.
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES.
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR.
* Applicable when using AES algorithm only.
* @op (IN): Type of operation.
* Refer to qcedev_offload_oper_enum.
* @err (OUT): Error in crypto status.
* Refer to qcedev_offload_err_enum.
*/
struct qcedev_offload_cipher_op_req {
struct qcedev_vbuf_info vbuf;
__u32 entries;
__u32 data_len;
__u32 in_place_op;
__u32 encklen;
__u8 iv[QCEDEV_MAX_IV_SIZE];
__u32 ivlen;
__u32 iv_ctr_size;
__u32 byteoffset;
__u8 block_offset;
__u8 is_pattern_valid;
__u8 is_copy_op;
struct pattern_info pattern_info;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_offload_oper_enum op;
enum qcedev_offload_err_enum err;
};
/** /**
* struct qfips_verify_t - Holds data for FIPS Integrity test * struct qfips_verify_t - Holds data for FIPS Integrity test
* @kernel_size (IN): Size of kernel Image * @kernel_size (IN): Size of kernel Image
@@ -286,4 +377,6 @@ struct file;
_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req) _IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
#define QCEDEV_IOCTL_UNMAP_BUF_REQ \ #define QCEDEV_IOCTL_UNMAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req) _IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
#define QCEDEV_IOCTL_OFFLOAD_OP_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 12, struct qcedev_offload_cipher_op_req)
#endif /* _QCEDEV__H */ #endif /* _QCEDEV__H */

View File

@@ -55,6 +55,7 @@ struct qce_f8_req {
__u8 ckey[OTA_KEY_SIZE]; __u8 ckey[OTA_KEY_SIZE];
enum qce_ota_dir_enum direction; enum qce_ota_dir_enum direction;
enum qce_ota_algo_enum algorithm; enum qce_ota_algo_enum algorithm;
int current_req_info;
}; };
/** /**
@@ -202,6 +203,7 @@ struct qce_f9_req {
enum qce_ota_dir_enum direction; enum qce_ota_dir_enum direction;
__u8 ikey[OTA_KEY_SIZE]; __u8 ikey[OTA_KEY_SIZE];
enum qce_ota_algo_enum algorithm; enum qce_ota_algo_enum algorithm;
int current_req_info;
}; };
#define QCOTA_IOC_MAGIC 0x85 #define QCOTA_IOC_MAGIC 0x85