Merge branch 'x86-xen-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-xen-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (42 commits) xen: cache cr0 value to avoid trap'n'emulate for read_cr0 xen/x86-64: clean up warnings about IST-using traps xen/x86-64: fix breakpoints and hardware watchpoints xen: reserve Xen start_info rather than e820 reserving xen: add FIX_TEXT_POKE to fixmap lguest: update lazy mmu changes to match lguest's use of kvm hypercalls xen: honour VCPU availability on boot xen: add "capabilities" file xen: drop kexec bits from /sys/hypervisor since kexec isn't implemented yet xen/sys/hypervisor: change writable_pt to features xen: add /sys/hypervisor support xen/xenbus: export xenbus_dev_changed xen: use device model for suspending xenbus devices xen: remove suspend_cancel hook xen/dev-evtchn: clean up locking in evtchn xen: export ioctl headers to userspace xen: add /dev/xen/evtchn driver xen: add irq_from_evtchn xen: clean up gate trap/interrupt constants xen: set _PAGE_NX in __supported_pte_mask before pagetable construction ...
This commit is contained in:
@@ -1379,6 +1379,11 @@ END(xen_failsafe_callback)
|
||||
paranoidzeroentry_ist debug do_debug DEBUG_STACK
|
||||
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
|
||||
paranoiderrorentry stack_segment do_stack_segment
|
||||
#ifdef CONFIG_XEN
|
||||
zeroentry xen_debug do_debug
|
||||
zeroentry xen_int3 do_int3
|
||||
errorentry xen_stack_segment do_stack_segment
|
||||
#endif
|
||||
errorentry general_protection do_general_protection
|
||||
errorentry page_fault do_page_fault
|
||||
#ifdef CONFIG_X86_MCE
|
||||
|
@@ -195,7 +195,7 @@ static void kvm_leave_lazy_mmu(void)
|
||||
struct kvm_para_state *state = kvm_para_state();
|
||||
|
||||
mmu_queue_flush(state);
|
||||
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
||||
paravirt_leave_lazy_mmu();
|
||||
state->mode = paravirt_get_lazy_mode();
|
||||
}
|
||||
|
||||
|
@@ -248,18 +248,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA
|
||||
|
||||
static inline void enter_lazy(enum paravirt_lazy_mode mode)
|
||||
{
|
||||
BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
|
||||
BUG_ON(preemptible());
|
||||
BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
|
||||
|
||||
__get_cpu_var(paravirt_lazy_mode) = mode;
|
||||
percpu_write(paravirt_lazy_mode, mode);
|
||||
}
|
||||
|
||||
void paravirt_leave_lazy(enum paravirt_lazy_mode mode)
|
||||
static void leave_lazy(enum paravirt_lazy_mode mode)
|
||||
{
|
||||
BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode);
|
||||
BUG_ON(preemptible());
|
||||
BUG_ON(percpu_read(paravirt_lazy_mode) != mode);
|
||||
|
||||
__get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
|
||||
percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
|
||||
}
|
||||
|
||||
void paravirt_enter_lazy_mmu(void)
|
||||
@@ -269,22 +267,36 @@ void paravirt_enter_lazy_mmu(void)
|
||||
|
||||
void paravirt_leave_lazy_mmu(void)
|
||||
{
|
||||
paravirt_leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_enter_lazy_cpu(void)
|
||||
void paravirt_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
||||
if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
|
||||
}
|
||||
enter_lazy(PARAVIRT_LAZY_CPU);
|
||||
}
|
||||
|
||||
void paravirt_leave_lazy_cpu(void)
|
||||
void paravirt_end_context_switch(struct task_struct *next)
|
||||
{
|
||||
paravirt_leave_lazy(PARAVIRT_LAZY_CPU);
|
||||
BUG_ON(preemptible());
|
||||
|
||||
leave_lazy(PARAVIRT_LAZY_CPU);
|
||||
|
||||
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||
{
|
||||
return __get_cpu_var(paravirt_lazy_mode);
|
||||
if (in_interrupt())
|
||||
return PARAVIRT_LAZY_NONE;
|
||||
|
||||
return percpu_read(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void)
|
||||
@@ -292,7 +304,6 @@ void arch_flush_lazy_mmu_mode(void)
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
WARN_ON(preempt_count() == 1);
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
@@ -300,19 +311,6 @@ void arch_flush_lazy_mmu_mode(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void arch_flush_lazy_cpu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
|
||||
WARN_ON(preempt_count() == 1);
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_enter_lazy_cpu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
@@ -404,10 +402,8 @@ struct pv_cpu_ops pv_cpu_ops = {
|
||||
.set_iopl_mask = native_set_iopl_mask,
|
||||
.io_delay = native_io_delay,
|
||||
|
||||
.lazy_mode = {
|
||||
.enter = paravirt_nop,
|
||||
.leave = paravirt_nop,
|
||||
},
|
||||
.start_context_switch = paravirt_nop,
|
||||
.end_context_switch = paravirt_nop,
|
||||
};
|
||||
|
||||
struct pv_apic_ops pv_apic_ops = {
|
||||
|
@@ -404,7 +404,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
* done before math_state_restore, so the TS bit is up
|
||||
* to date.
|
||||
*/
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_end_context_switch(next_p);
|
||||
|
||||
/* If the task has used fpu the last 5 timeslices, just do a full
|
||||
* restore of the math state immediately to avoid the trap; the
|
||||
|
@@ -425,7 +425,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
* done before math_state_restore, so the TS bit is up
|
||||
* to date.
|
||||
*/
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_end_context_switch(next_p);
|
||||
|
||||
/*
|
||||
* Switch FS and GS.
|
||||
|
@@ -462,22 +462,28 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void vmi_enter_lazy_cpu(void)
|
||||
static void vmi_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
paravirt_enter_lazy_cpu();
|
||||
paravirt_start_context_switch(prev);
|
||||
vmi_ops.set_lazy_mode(2);
|
||||
}
|
||||
|
||||
static void vmi_end_context_switch(struct task_struct *next)
|
||||
{
|
||||
vmi_ops.set_lazy_mode(0);
|
||||
paravirt_end_context_switch(next);
|
||||
}
|
||||
|
||||
static void vmi_enter_lazy_mmu(void)
|
||||
{
|
||||
paravirt_enter_lazy_mmu();
|
||||
vmi_ops.set_lazy_mode(1);
|
||||
}
|
||||
|
||||
static void vmi_leave_lazy(void)
|
||||
static void vmi_leave_lazy_mmu(void)
|
||||
{
|
||||
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
||||
vmi_ops.set_lazy_mode(0);
|
||||
paravirt_leave_lazy_mmu();
|
||||
}
|
||||
|
||||
static inline int __init check_vmi_rom(struct vrom_header *rom)
|
||||
@@ -711,14 +717,14 @@ static inline int __init activate_vmi(void)
|
||||
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
|
||||
para_fill(pv_cpu_ops.io_delay, IODelay);
|
||||
|
||||
para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
|
||||
para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
|
||||
set_lazy_mode, SetLazyMode);
|
||||
para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
|
||||
para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
|
||||
set_lazy_mode, SetLazyMode);
|
||||
|
||||
para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
|
||||
set_lazy_mode, SetLazyMode);
|
||||
para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
|
||||
para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
|
||||
set_lazy_mode, SetLazyMode);
|
||||
|
||||
/* user and kernel flush are just handled with different flags to FlushTLB */
|
||||
|
مرجع در شماره جدید
Block a user