Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:

 - Check for the right CPU feature bit in sm4-ce on arm64.

 - Fix scatterwalk WARN_ON in aes-gcm-ce on arm64.

 - Fix unaligned fault in aesni on x86.

 - Fix potential NULL pointer dereference on exit in chtls.

 - Fix DMA mapping direction for RSA in caam.

 - Fix error path return value for xts setkey in caam.

 - Fix address endianness when DMA unmapping in caam.

 - Fix sleep-in-atomic in vmx.

 - Fix command corruption when queue is full in cavium/nitrox.

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions.
  crypto: vmx - Fix sleep-in-atomic bugs
  crypto: arm64/aes-gcm-ce - fix scatterwalk API violation
  crypto: aesni - Use unaligned loads from gcm_context_data
  crypto: chtls - fix null dereference chtls_free_uld()
  crypto: arm64/sm4-ce - check for the right CPU feature bit
  crypto: caam - fix DMA mapping direction for RSA forms 2 & 3
  crypto: caam/qi - fix error path in xts setkey
  crypto: caam/jr - fix descriptor DMA unmapping
Šī revīzija ir iekļauta:
Linus Torvalds
2018-08-29 13:38:39 -07:00
revīzija b4df50de6a
13 mainīti faili ar 144 papildinājumiem un 106 dzēšanām

Parādīt failu

@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;

Parādīt failu

@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
}