Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: um, x86: Cast to (u64 *) inside set_64bit() x86-32, asm: Directly access per-cpu GDT x86-64, asm: Directly access per-cpu IST x86, asm: Merge cmpxchg_486_u64() and cmpxchg8b_emu() x86, asm: Move cmpxchg emulation code to arch/x86/lib x86, asm: Clean up and simplify <asm/cmpxchg.h> x86, asm: Clean up and simplify set_64bit() x86: Add memory modify constraints to xchg() and cmpxchg() x86-64: Simplify loading initial_gs x86: Use symbolic MSR names x86: Remove redundant K6 MSRs
This commit is contained in:
@@ -104,7 +104,7 @@ _start:
|
||||
movl %eax, %ecx
|
||||
orl %edx, %ecx
|
||||
jz 1f
|
||||
movl $0xc0000080, %ecx
|
||||
movl $MSR_EFER, %ecx
|
||||
wrmsr
|
||||
1:
|
||||
|
||||
|
@@ -16,7 +16,7 @@ obj-y := intel_cacheinfo.o scattered.o topology.o
|
||||
obj-y += proc.o capflags.o powerflags.o common.o
|
||||
obj-y += vmware.o hypervisor.o sched.o mshyperv.o
|
||||
|
||||
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
|
||||
obj-$(CONFIG_X86_32) += bugs.o
|
||||
obj-$(CONFIG_X86_64) += bugs_64.o
|
||||
|
||||
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
|
||||
|
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
* cmpxchg*() fallbacks for CPU not supporting these instructions
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG
|
||||
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
|
||||
{
|
||||
u8 prev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
|
||||
local_irq_save(flags);
|
||||
prev = *(u8 *)ptr;
|
||||
if (prev == old)
|
||||
*(u8 *)ptr = new;
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
EXPORT_SYMBOL(cmpxchg_386_u8);
|
||||
|
||||
unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
|
||||
{
|
||||
u16 prev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
|
||||
local_irq_save(flags);
|
||||
prev = *(u16 *)ptr;
|
||||
if (prev == old)
|
||||
*(u16 *)ptr = new;
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
EXPORT_SYMBOL(cmpxchg_386_u16);
|
||||
|
||||
unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
|
||||
{
|
||||
u32 prev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
|
||||
local_irq_save(flags);
|
||||
prev = *(u32 *)ptr;
|
||||
if (prev == old)
|
||||
*(u32 *)ptr = new;
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
EXPORT_SYMBOL(cmpxchg_386_u32);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
|
||||
{
|
||||
u64 prev;
|
||||
unsigned long flags;
|
||||
|
||||
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
|
||||
local_irq_save(flags);
|
||||
prev = *(u64 *)ptr;
|
||||
if (prev == old)
|
||||
*(u64 *)ptr = new;
|
||||
local_irq_restore(flags);
|
||||
return prev;
|
||||
}
|
||||
EXPORT_SYMBOL(cmpxchg_486_u64);
|
||||
#endif
|
||||
|
@@ -611,14 +611,14 @@ ldt_ss:
|
||||
* compensating for the offset by changing to the ESPFIX segment with
|
||||
* a base address that matches for the difference.
|
||||
*/
|
||||
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
|
||||
mov %esp, %edx /* load kernel esp */
|
||||
mov PT_OLDESP(%esp), %eax /* load userspace esp */
|
||||
mov %dx, %ax /* eax: new kernel esp */
|
||||
sub %eax, %edx /* offset (low word is 0) */
|
||||
PER_CPU(gdt_page, %ebx)
|
||||
shr $16, %edx
|
||||
mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
|
||||
mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
|
||||
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
|
||||
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
||||
pushl $__ESPFIX_SS
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
push %eax /* new kernel esp */
|
||||
@@ -791,9 +791,8 @@ ptregs_clone:
|
||||
* normal stack and adjusts ESP with the matching offset.
|
||||
*/
|
||||
/* fixup the stack */
|
||||
PER_CPU(gdt_page, %ebx)
|
||||
mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
|
||||
mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
|
||||
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
|
||||
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
|
||||
shl $16, %eax
|
||||
addl %esp, %eax /* the adjusted stack pointer */
|
||||
pushl $__KERNEL_DS
|
||||
|
@@ -1065,6 +1065,7 @@ ENTRY(\sym)
|
||||
END(\sym)
|
||||
.endm
|
||||
|
||||
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
|
||||
.macro paranoidzeroentry_ist sym do_sym ist
|
||||
ENTRY(\sym)
|
||||
INTR_FRAME
|
||||
@@ -1076,10 +1077,9 @@ ENTRY(\sym)
|
||||
TRACE_IRQS_OFF
|
||||
movq %rsp,%rdi /* pt_regs pointer */
|
||||
xorl %esi,%esi /* no error code */
|
||||
PER_CPU(init_tss, %r12)
|
||||
subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
|
||||
subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
|
||||
call \do_sym
|
||||
addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
|
||||
addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
|
||||
jmp paranoid_exit /* %ebx: no swapgs flag */
|
||||
CFI_ENDPROC
|
||||
END(\sym)
|
||||
|
@@ -234,9 +234,8 @@ ENTRY(secondary_startup_64)
|
||||
* init data section till per cpu areas are set up.
|
||||
*/
|
||||
movl $MSR_GS_BASE,%ecx
|
||||
movq initial_gs(%rip),%rax
|
||||
movq %rax,%rdx
|
||||
shrq $32,%rdx
|
||||
movl initial_gs(%rip),%eax
|
||||
movl initial_gs+4(%rip),%edx
|
||||
wrmsr
|
||||
|
||||
/* esi is pointer to real mode structure with interesting info.
|
||||
|
@@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/msr-index.h>
|
||||
|
||||
verify_cpu:
|
||||
pushfl # Save caller passed flags
|
||||
@@ -88,7 +89,7 @@ verify_cpu_sse_test:
|
||||
je verify_cpu_sse_ok
|
||||
test %di,%di
|
||||
jz verify_cpu_no_longmode # only try to force SSE on AMD
|
||||
movl $0xc0010015,%ecx # HWCR
|
||||
movl $MSR_K7_HWCR,%ecx
|
||||
rdmsr
|
||||
btr $15,%eax # enable SSE
|
||||
wrmsr
|
||||
|
Reference in New Issue
Block a user