Merge branch 'core/percpu' into x86/paravirt
This commit is contained in:
@@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
|
||||
unsigned long va)
|
||||
static void xen_flush_tlb_others(const struct cpumask *cpus,
|
||||
struct mm_struct *mm, unsigned long va)
|
||||
{
|
||||
struct {
|
||||
struct mmuext_op op;
|
||||
cpumask_t mask;
|
||||
DECLARE_BITMAP(mask, NR_CPUS);
|
||||
} *args;
|
||||
cpumask_t cpumask = *cpus;
|
||||
struct multicall_space mcs;
|
||||
|
||||
/*
|
||||
* A couple of (to be removed) sanity checks:
|
||||
*
|
||||
* - current CPU must not be in mask
|
||||
* - mask must exist :)
|
||||
*/
|
||||
BUG_ON(cpus_empty(cpumask));
|
||||
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
|
||||
BUG_ON(cpumask_empty(cpus));
|
||||
BUG_ON(!mm);
|
||||
|
||||
/* If a CPU which we ran on has gone down, OK. */
|
||||
cpus_and(cpumask, cpumask, cpu_online_map);
|
||||
if (cpus_empty(cpumask))
|
||||
return;
|
||||
|
||||
mcs = xen_mc_entry(sizeof(*args));
|
||||
args = mcs.args;
|
||||
args->mask = cpumask;
|
||||
args->op.arg2.vcpumask = &args->mask;
|
||||
args->op.arg2.vcpumask = to_cpumask(args->mask);
|
||||
|
||||
/* Remove us, and any offline CPUS. */
|
||||
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
|
||||
if (unlikely(cpumask_empty(to_cpumask(args->mask))))
|
||||
goto issue;
|
||||
|
||||
if (va == TLB_FLUSH_ALL) {
|
||||
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
||||
@@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
|
||||
|
||||
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
|
||||
|
||||
issue:
|
||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
@@ -702,17 +695,17 @@ static void xen_write_cr0(unsigned long cr0)
|
||||
|
||||
static void xen_write_cr2(unsigned long cr2)
|
||||
{
|
||||
x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
|
||||
percpu_read(xen_vcpu)->arch.cr2 = cr2;
|
||||
}
|
||||
|
||||
static unsigned long xen_read_cr2(void)
|
||||
{
|
||||
return x86_read_percpu(xen_vcpu)->arch.cr2;
|
||||
return percpu_read(xen_vcpu)->arch.cr2;
|
||||
}
|
||||
|
||||
static unsigned long xen_read_cr2_direct(void)
|
||||
{
|
||||
return x86_read_percpu(xen_vcpu_info.arch.cr2);
|
||||
return percpu_read(xen_vcpu_info.arch.cr2);
|
||||
}
|
||||
|
||||
static void xen_write_cr4(unsigned long cr4)
|
||||
@@ -725,12 +718,12 @@ static void xen_write_cr4(unsigned long cr4)
|
||||
|
||||
static unsigned long xen_read_cr3(void)
|
||||
{
|
||||
return x86_read_percpu(xen_cr3);
|
||||
return percpu_read(xen_cr3);
|
||||
}
|
||||
|
||||
static void set_current_cr3(void *v)
|
||||
{
|
||||
x86_write_percpu(xen_current_cr3, (unsigned long)v);
|
||||
percpu_write(xen_current_cr3, (unsigned long)v);
|
||||
}
|
||||
|
||||
static void __xen_write_cr3(bool kernel, unsigned long cr3)
|
||||
@@ -755,7 +748,7 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
|
||||
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
||||
|
||||
if (kernel) {
|
||||
x86_write_percpu(xen_cr3, cr3);
|
||||
percpu_write(xen_cr3, cr3);
|
||||
|
||||
/* Update xen_current_cr3 once the batch has actually
|
||||
been submitted. */
|
||||
@@ -771,7 +764,7 @@ static void xen_write_cr3(unsigned long cr3)
|
||||
|
||||
/* Update while interrupts are disabled, so its atomic with
|
||||
respect to ipis */
|
||||
x86_write_percpu(xen_cr3, cr3);
|
||||
percpu_write(xen_cr3, cr3);
|
||||
|
||||
__xen_write_cr3(true, cr3);
|
||||
|
||||
@@ -1651,7 +1644,6 @@ asmlinkage void __init xen_start_kernel(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Disable until direct per-cpu data access. */
|
||||
have_vcpu_info_placement = 0;
|
||||
x86_64_init_pda();
|
||||
#endif
|
||||
|
||||
xen_smp_init();
|
||||
|
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
|
||||
struct vcpu_info *vcpu;
|
||||
unsigned long flags;
|
||||
|
||||
vcpu = x86_read_percpu(xen_vcpu);
|
||||
vcpu = percpu_read(xen_vcpu);
|
||||
|
||||
/* flag has opposite sense of mask */
|
||||
flags = !vcpu->evtchn_upcall_mask;
|
||||
@@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags)
|
||||
make sure we're don't switch CPUs between getting the vcpu
|
||||
pointer and updating the mask. */
|
||||
preempt_disable();
|
||||
vcpu = x86_read_percpu(xen_vcpu);
|
||||
vcpu = percpu_read(xen_vcpu);
|
||||
vcpu->evtchn_upcall_mask = flags;
|
||||
preempt_enable_no_resched();
|
||||
|
||||
@@ -83,7 +83,7 @@ static void xen_irq_disable(void)
|
||||
make sure we're don't switch CPUs between getting the vcpu
|
||||
pointer and updating the mask. */
|
||||
preempt_disable();
|
||||
x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
|
||||
percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ static void xen_irq_enable(void)
|
||||
the caller is confused and is trying to re-enable interrupts
|
||||
on an indeterminate processor. */
|
||||
|
||||
vcpu = x86_read_percpu(xen_vcpu);
|
||||
vcpu = percpu_read(xen_vcpu);
|
||||
vcpu->evtchn_upcall_mask = 0;
|
||||
|
||||
/* Doesn't matter if we get preempted here, because any
|
||||
|
@@ -1063,18 +1063,14 @@ static void drop_other_mm_ref(void *info)
|
||||
struct mm_struct *mm = info;
|
||||
struct mm_struct *active_mm;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
active_mm = read_pda(active_mm);
|
||||
#else
|
||||
active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
|
||||
#endif
|
||||
active_mm = percpu_read(cpu_tlbstate.active_mm);
|
||||
|
||||
if (active_mm == mm)
|
||||
leave_mm(smp_processor_id());
|
||||
|
||||
/* If this cpu still has a stale cr3 reference, then make sure
|
||||
it has been flushed. */
|
||||
if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
|
||||
if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
|
||||
load_cr3(swapper_pg_dir);
|
||||
arch_flush_lazy_cpu_mode();
|
||||
}
|
||||
|
@@ -39,7 +39,7 @@ static inline void xen_mc_issue(unsigned mode)
|
||||
xen_mc_flush();
|
||||
|
||||
/* restore flags saved in xen_mc_batch */
|
||||
local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
|
||||
local_irq_restore(percpu_read(xen_mc_irq_flags));
|
||||
}
|
||||
|
||||
/* Set up a callback to be called when the current batch is flushed */
|
||||
|
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
|
||||
*/
|
||||
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
__get_cpu_var(irq_stat).irq_resched_count++;
|
||||
#else
|
||||
add_pda(irq_resched_count, 1);
|
||||
#endif
|
||||
inc_irq_stat(irq_resched_count);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
|
||||
xen_setup_cpu_clockevents();
|
||||
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
x86_write_percpu(cpu_state, CPU_ONLINE);
|
||||
percpu_write(cpu_state, CPU_ONLINE);
|
||||
wmb();
|
||||
|
||||
/* We can take interrupts now: we're officially "up". */
|
||||
@@ -283,22 +279,10 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
|
||||
struct task_struct *idle = idle_task(cpu);
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Allocate node local memory for AP pdas */
|
||||
WARN_ON(cpu == 0);
|
||||
if (cpu > 0) {
|
||||
rc = get_local_pda(cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
init_gdt(cpu);
|
||||
per_cpu(current_task, cpu) = idle;
|
||||
#ifdef CONFIG_X86_32
|
||||
irq_ctx_init(cpu);
|
||||
#else
|
||||
cpu_pda(cpu)->pcurrent = idle;
|
||||
clear_tsk_thread_flag(idle, TIF_FORK);
|
||||
#endif
|
||||
xen_setup_timer(cpu);
|
||||
@@ -445,11 +429,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
#ifdef CONFIG_X86_32
|
||||
__get_cpu_var(irq_stat).irq_call_count++;
|
||||
#else
|
||||
add_pda(irq_call_count, 1);
|
||||
#endif
|
||||
inc_irq_stat(irq_call_count);
|
||||
irq_exit();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -459,11 +439,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
irq_enter();
|
||||
generic_smp_call_function_single_interrupt();
|
||||
#ifdef CONFIG_X86_32
|
||||
__get_cpu_var(irq_stat).irq_call_count++;
|
||||
#else
|
||||
add_pda(irq_call_count, 1);
|
||||
#endif
|
||||
inc_irq_stat(irq_call_count);
|
||||
irq_exit();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/page.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#include "xen-ops.h"
|
||||
#include "mmu.h"
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
@@ -28,12 +29,10 @@
|
||||
|
||||
#if 1
|
||||
/*
|
||||
x86-64 does not yet support direct access to percpu variables
|
||||
via a segment override, so we just need to make sure this code
|
||||
never gets used
|
||||
FIXME: x86_64 now can support direct access to percpu variables
|
||||
via a segment override. Update xen accordingly.
|
||||
*/
|
||||
#define BUG ud2a
|
||||
#define PER_CPU_VAR(var, off) 0xdeadbeef
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -45,14 +44,14 @@ ENTRY(xen_irq_enable_direct)
|
||||
BUG
|
||||
|
||||
/* Unmask events */
|
||||
movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
||||
|
||||
/* Preempt here doesn't matter because that will deal with
|
||||
any pending interrupts. The pending check may end up being
|
||||
run on the wrong CPU, but that doesn't hurt. */
|
||||
|
||||
/* Test for pending */
|
||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
|
||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
||||
jz 1f
|
||||
|
||||
2: call check_events
|
||||
@@ -69,7 +68,7 @@ ENDPATCH(xen_irq_enable_direct)
|
||||
ENTRY(xen_irq_disable_direct)
|
||||
BUG
|
||||
|
||||
movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
||||
ENDPATCH(xen_irq_disable_direct)
|
||||
ret
|
||||
ENDPROC(xen_irq_disable_direct)
|
||||
@@ -87,7 +86,7 @@ ENDPATCH(xen_irq_disable_direct)
|
||||
ENTRY(xen_save_fl_direct)
|
||||
BUG
|
||||
|
||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
||||
setz %ah
|
||||
addb %ah,%ah
|
||||
ENDPATCH(xen_save_fl_direct)
|
||||
@@ -107,13 +106,13 @@ ENTRY(xen_restore_fl_direct)
|
||||
BUG
|
||||
|
||||
testb $X86_EFLAGS_IF>>8, %ah
|
||||
setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
|
||||
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
||||
/* Preempt here doesn't matter because that will deal with
|
||||
any pending interrupts. The pending check may end up being
|
||||
run on the wrong CPU, but that doesn't hurt. */
|
||||
|
||||
/* check for unmasked and pending */
|
||||
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
|
||||
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
||||
jz 1f
|
||||
2: call check_events
|
||||
1:
|
||||
@@ -195,11 +194,11 @@ RELOC(xen_sysexit, 1b+1)
|
||||
ENTRY(xen_sysret64)
|
||||
/* We're already on the usermode stack at this point, but still
|
||||
with the kernel gs, so we can easily switch back */
|
||||
movq %rsp, %gs:pda_oldrsp
|
||||
movq %gs:pda_kernelstack,%rsp
|
||||
movq %rsp, PER_CPU_VAR(old_rsp)
|
||||
movq PER_CPU_VAR(kernel_stack),%rsp
|
||||
|
||||
pushq $__USER_DS
|
||||
pushq %gs:pda_oldrsp
|
||||
pushq PER_CPU_VAR(old_rsp)
|
||||
pushq %r11
|
||||
pushq $__USER_CS
|
||||
pushq %rcx
|
||||
@@ -212,11 +211,11 @@ RELOC(xen_sysret64, 1b+1)
|
||||
ENTRY(xen_sysret32)
|
||||
/* We're already on the usermode stack at this point, but still
|
||||
with the kernel gs, so we can easily switch back */
|
||||
movq %rsp, %gs:pda_oldrsp
|
||||
movq %gs:pda_kernelstack, %rsp
|
||||
movq %rsp, PER_CPU_VAR(old_rsp)
|
||||
movq PER_CPU_VAR(kernel_stack), %rsp
|
||||
|
||||
pushq $__USER32_DS
|
||||
pushq %gs:pda_oldrsp
|
||||
pushq PER_CPU_VAR(old_rsp)
|
||||
pushq %r11
|
||||
pushq $__USER32_CS
|
||||
pushq %rcx
|
||||
|
Viittaa uudesa ongelmassa
Block a user