Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
This commit is contained in:
@@ -47,10 +47,10 @@
|
||||
static DEFINE_SPINLOCK(irq_mapping_update_lock);
|
||||
|
||||
/* IRQ <-> VIRQ mapping. */
|
||||
static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
|
||||
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
|
||||
|
||||
/* IRQ <-> IPI mapping */
|
||||
static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
|
||||
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
|
||||
|
||||
/* Interrupt types. */
|
||||
enum xen_irq_type {
|
||||
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
|
||||
|
||||
/*
|
||||
* Search the CPUs pending events bitmasks. For each one found, map
|
||||
* the event number to an irq, and feed it into do_IRQ() for
|
||||
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
|
||||
static DEFINE_PER_CPU(unsigned, nesting_count);
|
||||
unsigned count;
|
||||
|
||||
exit_idle();
|
||||
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
|
||||
|
||||
vcpu_info->evtchn_upcall_pending = 0;
|
||||
|
||||
if (__get_cpu_var(nesting_count)++)
|
||||
if (__get_cpu_var(xed_nesting_count)++)
|
||||
goto out;
|
||||
|
||||
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
|
||||
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
count = __get_cpu_var(nesting_count);
|
||||
__get_cpu_var(nesting_count) = 0;
|
||||
count = __get_cpu_var(xed_nesting_count);
|
||||
__get_cpu_var(xed_nesting_count) = 0;
|
||||
} while(count != 1);
|
||||
|
||||
out:
|
||||
|
Reference in New Issue
Block a user