Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (22 commits) x86: Fix code patching for paravirt-alternatives on 486 x86, msr: change msr-reg.o to obj-y, and export its symbols x86: Use hard_smp_processor_id() to get apic id for AMD K8 cpus x86, sched: Workaround broken sched domain creation for AMD Magny-Cours x86, mcheck: Use correct cpumask for shared bank4 x86, cacheinfo: Fixup L3 cache information for AMD multi-node processors x86: Fix CPU llc_shared_map information for AMD Magny-Cours x86, msr: Fix msr-reg.S compilation with gas 2.16.1, on 32-bit too x86: Move kernel_fpu_using to irq_fpu_usable in asm/i387.h x86, msr: fix msr-reg.S compilation with gas 2.16.1 x86, msr: Export the register-setting MSR functions via /dev/*/msr x86, msr: Create _on_cpu helpers for {rw,wr}msr_safe_regs() x86, msr: Have the _safe MSR functions return -EIO, not -EFAULT x86, msr: CFI annotations, cleanups for msr-reg.S x86, asm: Make _ASM_EXTABLE() usable from assembly code x86, asm: Add 32-bit versions of the combined CFI macros x86, AMD: Disable wrongly set X86_FEATURE_LAHF_LM CPUID bit x86, msr: Rewrite AMD rd/wrmsr variants x86, msr: Add rd/wrmsr interfaces with preset registers x86: add specific support for Intel Atom architecture ...
This commit is contained in:
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv);
|
||||
|
||||
static inline int kernel_fpu_using(void)
|
||||
{
|
||||
if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
|
||||
{
|
||||
unsigned long addr = (unsigned long)raw_ctx;
|
||||
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
err = crypto_aes_expand_key(ctx, in_key, key_len);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
crypto_aes_encrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (kernel_fpu_using())
|
||||
if (irq_fpu_usable())
|
||||
crypto_aes_decrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (kernel_fpu_using()) {
|
||||
if (irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (kernel_fpu_using()) {
|
||||
if (irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
|
Reference in New Issue
Block a user