Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - Check for the right CPU feature bit in sm4-ce on arm64.

 - Fix scatterwalk WARN_ON in aes-gcm-ce on arm64.

 - Fix unaligned fault in aesni on x86.

 - Fix potential NULL pointer dereference on exit in chtls.

 - Fix DMA mapping direction for RSA in caam.

 - Fix error path return value for xts setkey in caam.

 - Fix address endianness when DMA unmapping in caam.

 - Fix sleep-in-atomic in vmx.

 - Fix command corruption when queue is full in cavium/nitrox.

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
  crypto: vmx - Fix sleep-in-atomic bugs
  crypto: arm64/aes-gcm-ce - fix scatterwalk API violation
  crypto: aesni - Use unaligned loads from gcm_context_data
  crypto: chtls - fix null dereference chtls_free_uld()
  crypto: arm64/sm4-ce - check for the right CPU feature bit
  crypto: caam - fix DMA mapping direction for RSA forms 2 & 3
  crypto: caam/qi - fix error path in xts setkey
  crypto: caam/jr - fix descriptor DMA unmapping
Esse commit está contido em:
Linus Torvalds
2018-08-29 13:38:39 -07:00
13 arquivos alterados com 144 adições e 106 exclusões

Ver arquivo

@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
goto badkey;
}
ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return 0;
return -EINVAL;
}
/*

Ver arquivo

@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
/* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:

Ver arquivo

@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev, jrp->outring[hw_idx].desc,
dma_unmap_single(dev,
caam_dma_to_cpu(jrp->outring[hw_idx].desc),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);

Ver arquivo

@@ -35,6 +35,7 @@ struct nitrox_cmdq {
/* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
/* command size 32B/64B */
u8 instr_size;
u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
struct bh_data *slc;
};
/* NITROX-5 driver state */
/* NITROX-V driver state */
#define NITROX_UCODE_LOADED 0
#define NITROX_READY 1

Ver arquivo

@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN);
cmdq->write_idx = 0;
spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock);

Ver arquivo

@@ -42,6 +42,16 @@
* Invalid flag options in AES-CCM IV.
*/
static inline int incr_index(int index, int count, int max)
{
if ((index + count) >= max)
index = index + count - max;
else
index += count;
return index;
}
/**
* dma_free_sglist - unmap and free the sg lists.
* @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = sr->ndev;
union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
u64 offset;
int idx;
u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock);
/* get the next write offset */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
idx = cmdq->write_idx;
/* copy the instruction */
ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
ent = cmdq->head + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
/* flush the command queue updates */
dma_wmb();
sr->tstamp = jiffies;
atomic_set(&sr->status, REQ_POSTED);
response_list_add(sr, cmdq);
sr->tstamp = jiffies;
/* flush the command queue updates */
dma_wmb();
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
/* orders the doorbell rings */
mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
spin_unlock_bh(&cmdq->cmdq_lock);
}
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
struct nitrox_softreq *sr, *tmp;
int ret = 0;
if (!atomic_read(&cmdq->backlog_count))
return 0;
spin_lock_bh(&cmdq->backlog_lock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -EBUSY;
ret = -ENOSPC;
break;
}
/* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
{
struct nitrox_cmdq *cmdq = sr->cmdq;
struct nitrox_device *ndev = sr->ndev;
int ret = -EBUSY;
/* try to post backlog requests */
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EAGAIN;
return -ENOSPC;
/* add to backlog list */
backlog_list_add(sr, cmdq);
} else {
ret = post_backlog_cmds(cmdq);
if (ret) {
backlog_list_add(sr, cmdq);
return ret;
}
post_se_instr(sr, cmdq);
ret = -EINPROGRESS;
return -EBUSY;
}
return ret;
post_se_instr(sr, cmdq);
return -EINPROGRESS;
}
/**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
*/
sr->instr.fdata[0] = *((u64 *)&req->gph);
sr->instr.fdata[1] = 0;
/* flush the soft_req changes before posting the cmd */
wmb();
ret = nitrox_enqueue_request(sr);
if (ret == -EAGAIN)
if (ret == -ENOSPC)
goto send_fail;
return ret;

Ver arquivo

@@ -96,6 +96,10 @@ enum csk_flags {
CSK_CONN_INLINE, /* Connection on HW */
};
enum chtls_cdev_state {
CHTLS_CDEV_STATE_UP = 1
};
struct listen_ctx {
struct sock *lsk;
struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
unsigned int send_page_order;
int max_host_sndbuf;
struct key_map kmap;
unsigned int cdev_state;
};
struct chtls_hws {

Ver arquivo

@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tls_register_device(&cdev->tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
chtls_free_uld(cdev);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
chtls_free_uld(cdev);
}
mutex_unlock(&cdev_mutex);
}

Ver arquivo

@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;

Ver arquivo

@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
}