Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - vDSO and asm entry improvements (Andy Lutomirski) - Xen paravirt entry enhancements (Boris Ostrovsky) - asm entry labels enhancement (Borislav Petkov) - and other misc changes (Thomas Gleixner, me)" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vsdo: Fix build on PARAVIRT_CLOCK=y, KVM_GUEST=n Revert "x86/kvm: On KVM re-enable (e.g. after suspend), update clocks" x86/entry/64_compat: Make labels local x86/platform/uv: Include clocksource.h for clocksource_touch_watchdog() x86/vdso: Enable vdso pvclock access on all vdso variants x86/vdso: Remove pvclock fixmap machinery x86/vdso: Get pvclock data from the vvar VMA instead of the fixmap x86, vdso, pvclock: Simplify and speed up the vdso pvclock reader x86/kvm: On KVM re-enable (e.g. after suspend), update clocks x86/entry/64: Bypass enter_from_user_mode on non-context-tracking boots x86/asm: Add asm macros for static keys/jump labels x86/asm: Error out if asm/jump_label.h is included inappropriately context_tracking: Switch to new static_branch API x86/entry, x86/paravirt: Remove the unused usergs_sysret32 PV op x86/paravirt: Remove the unused irq_enable_sysexit pv op x86/xen: Avoid fast syscall path for Xen PV guests
This commit is contained in:
@@ -65,9 +65,6 @@ void common(void) {
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
#ifdef CONFIG_X86_32
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
#endif
|
||||
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
||||
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
||||
#endif
|
||||
|
@@ -23,7 +23,6 @@ int main(void)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
|
||||
OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
|
||||
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
|
||||
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
|
||||
BLANK();
|
||||
|
@@ -45,6 +45,11 @@ early_param("no-kvmclock", parse_no_kvmclock);
|
||||
static struct pvclock_vsyscall_time_info *hv_clock;
|
||||
static struct pvclock_wall_clock wall_clock;
|
||||
|
||||
struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
{
|
||||
return hv_clock;
|
||||
}
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
* have elapsed since the hypervisor wrote the data. So we try to account for
|
||||
@@ -305,7 +310,6 @@ int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
int cpu;
|
||||
int ret;
|
||||
u8 flags;
|
||||
struct pvclock_vcpu_time_info *vcpu_time;
|
||||
unsigned int size;
|
||||
@@ -325,11 +329,6 @@ int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
|
||||
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
|
||||
|
@@ -162,10 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
ret = paravirt_patch_ident_64(insnbuf, len);
|
||||
|
||||
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
|
||||
#ifdef CONFIG_X86_32
|
||||
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
|
||||
#endif
|
||||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
|
||||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
|
||||
/* If operation requires a jmp, then jmp */
|
||||
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
|
||||
@@ -220,8 +216,6 @@ static u64 native_steal_clock(int cpu)
|
||||
|
||||
/* These are in entry.S */
|
||||
extern void native_iret(void);
|
||||
extern void native_irq_enable_sysexit(void);
|
||||
extern void native_usergs_sysret32(void);
|
||||
extern void native_usergs_sysret64(void);
|
||||
|
||||
static struct resource reserve_ioports = {
|
||||
@@ -379,13 +373,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
|
||||
|
||||
.load_sp0 = native_load_sp0,
|
||||
|
||||
#if defined(CONFIG_X86_32)
|
||||
.irq_enable_sysexit = native_irq_enable_sysexit,
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
.usergs_sysret32 = native_usergs_sysret32,
|
||||
#endif
|
||||
.usergs_sysret64 = native_usergs_sysret64,
|
||||
#endif
|
||||
.iret = native_iret,
|
||||
|
@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
|
||||
DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
|
||||
DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
|
||||
DEF_NATIVE(pv_cpu_ops, iret, "iret");
|
||||
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
|
||||
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
|
||||
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
|
||||
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
|
||||
@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
PATCH_SITE(pv_irq_ops, restore_fl);
|
||||
PATCH_SITE(pv_irq_ops, save_fl);
|
||||
PATCH_SITE(pv_cpu_ops, iret);
|
||||
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
|
||||
PATCH_SITE(pv_mmu_ops, read_cr2);
|
||||
PATCH_SITE(pv_mmu_ops, read_cr3);
|
||||
PATCH_SITE(pv_mmu_ops, write_cr3);
|
||||
|
@@ -13,9 +13,7 @@ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
|
||||
DEF_NATIVE(pv_cpu_ops, clts, "clts");
|
||||
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
|
||||
|
||||
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
|
||||
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
|
||||
DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
|
||||
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
|
||||
|
||||
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
||||
@@ -55,7 +53,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
PATCH_SITE(pv_irq_ops, save_fl);
|
||||
PATCH_SITE(pv_irq_ops, irq_enable);
|
||||
PATCH_SITE(pv_irq_ops, irq_disable);
|
||||
PATCH_SITE(pv_cpu_ops, usergs_sysret32);
|
||||
PATCH_SITE(pv_cpu_ops, usergs_sysret64);
|
||||
PATCH_SITE(pv_cpu_ops, swapgs);
|
||||
PATCH_SITE(pv_mmu_ops, read_cr2);
|
||||
|
@@ -140,27 +140,3 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
||||
|
||||
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Initialize the generic pvclock vsyscall state. This will allocate
|
||||
* a/some page(s) for the per-vcpu pvclock information, set up a
|
||||
* fixmap mapping for the page(s)
|
||||
*/
|
||||
|
||||
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
|
||||
int size)
|
||||
{
|
||||
int idx;
|
||||
|
||||
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
|
||||
|
||||
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
|
||||
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
|
||||
__pa(i) + (idx*PAGE_SIZE),
|
||||
PAGE_KERNEL_VVAR);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user