Merge branch 'linus' into x86/urgent
Merge reason: Merge upstream commits to avoid conflicts in upcoming patches. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -66,9 +66,9 @@ obj-$(CONFIG_PCI) += early-quirks.o
|
||||
apm-y := apm_32.o
|
||||
obj-$(CONFIG_APM) += apm.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o
|
||||
obj-$(CONFIG_SMP) += tsc_sync.o
|
||||
obj-$(CONFIG_SMP) += setup_percpu.o
|
||||
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
|
||||
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
|
||||
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
|
||||
obj-y += apic/
|
||||
@@ -109,6 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
|
||||
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||
obj-$(CONFIG_OF) += devicetree.o
|
||||
|
||||
###
|
||||
# 64 bit specific files
|
||||
|
@@ -595,14 +595,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
nid = acpi_get_node(handle);
|
||||
if (nid == -1 || !node_online(nid))
|
||||
return;
|
||||
#ifdef CONFIG_X86_64
|
||||
apicid_to_node[physid] = nid;
|
||||
set_apicid_to_node(physid, nid);
|
||||
numa_set_node(cpu, nid);
|
||||
#else /* CONFIG_X86_32 */
|
||||
apicid_2_node[physid] = nid;
|
||||
cpu_to_node_map[cpu] = nid;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -508,64 +508,12 @@ static int apbt_next_event(unsigned long delta,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* APB timer clock is not in sync with pclk on Langwell, which translates to
|
||||
* unreliable read value caused by sampling error. the error does not add up
|
||||
* overtime and only happens when sampling a 0 as a 1 by mistake. so the time
|
||||
* would go backwards. the following code is trying to prevent time traveling
|
||||
* backwards. little bit paranoid.
|
||||
*/
|
||||
static cycle_t apbt_read_clocksource(struct clocksource *cs)
|
||||
{
|
||||
unsigned long t0, t1, t2;
|
||||
static unsigned long last_read;
|
||||
unsigned long current_count;
|
||||
|
||||
bad_count:
|
||||
t1 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
t2 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
if (unlikely(t1 < t2)) {
|
||||
pr_debug("APBT: read current count error %lx:%lx:%lx\n",
|
||||
t1, t2, t2 - t1);
|
||||
goto bad_count;
|
||||
}
|
||||
/*
|
||||
* check against cached last read, makes sure time does not go back.
|
||||
* it could be a normal rollover but we will do tripple check anyway
|
||||
*/
|
||||
if (unlikely(t2 > last_read)) {
|
||||
/* check if we have a normal rollover */
|
||||
unsigned long raw_intr_status =
|
||||
apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
|
||||
/*
|
||||
* cs timer interrupt is masked but raw intr bit is set if
|
||||
* rollover occurs. then we read EOI reg to clear it.
|
||||
*/
|
||||
if (raw_intr_status & (1 << phy_cs_timer_id)) {
|
||||
apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
|
||||
goto out;
|
||||
}
|
||||
pr_debug("APB CS going back %lx:%lx:%lx ",
|
||||
t2, last_read, t2 - last_read);
|
||||
bad_count_x3:
|
||||
pr_debug("triple check enforced\n");
|
||||
t0 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
udelay(1);
|
||||
t1 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
udelay(1);
|
||||
t2 = apbt_readl(phy_cs_timer_id,
|
||||
APBTMR_N_CURRENT_VALUE);
|
||||
if ((t2 > t1) || (t1 > t0)) {
|
||||
printk(KERN_ERR "Error: APB CS tripple check failed\n");
|
||||
goto bad_count_x3;
|
||||
}
|
||||
}
|
||||
out:
|
||||
last_read = t2;
|
||||
return (cycle_t)~t2;
|
||||
current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
|
||||
return (cycle_t)~current_count;
|
||||
}
|
||||
|
||||
static int apbt_clocksource_register(void)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/pci.h>
|
||||
@@ -57,7 +57,7 @@ static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
|
||||
static u32 __init allocate_aperture(void)
|
||||
{
|
||||
u32 aper_size;
|
||||
void *p;
|
||||
unsigned long addr;
|
||||
|
||||
/* aper_size should <= 1G */
|
||||
if (fallback_aper_order > 5)
|
||||
@@ -83,27 +83,26 @@ static u32 __init allocate_aperture(void)
|
||||
* so don't use 512M below as gart iommu, leave the space for kernel
|
||||
* code for safe
|
||||
*/
|
||||
p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
|
||||
addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
|
||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
|
||||
printk(KERN_ERR
|
||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||
addr, aper_size>>10);
|
||||
return 0;
|
||||
}
|
||||
memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
|
||||
/*
|
||||
* Kmemleak should not scan this block as it may not be mapped via the
|
||||
* kernel direct mapping.
|
||||
*/
|
||||
kmemleak_ignore(p);
|
||||
if (!p || __pa(p)+aper_size > 0xffffffff) {
|
||||
printk(KERN_ERR
|
||||
"Cannot allocate aperture memory hole (%p,%uK)\n",
|
||||
p, aper_size>>10);
|
||||
if (p)
|
||||
free_bootmem(__pa(p), aper_size);
|
||||
return 0;
|
||||
}
|
||||
kmemleak_ignore(phys_to_virt(addr));
|
||||
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
|
||||
aper_size >> 10, __pa(p));
|
||||
insert_aperture_resource((u32)__pa(p), aper_size);
|
||||
register_nosave_region((u32)__pa(p) >> PAGE_SHIFT,
|
||||
(u32)__pa(p+aper_size) >> PAGE_SHIFT);
|
||||
aper_size >> 10, addr);
|
||||
insert_aperture_resource((u32)addr, aper_size);
|
||||
register_nosave_region(addr >> PAGE_SHIFT,
|
||||
(addr+aper_size) >> PAGE_SHIFT);
|
||||
|
||||
return (u32)__pa(p);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -43,6 +43,7 @@
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/idle.h>
|
||||
@@ -78,12 +79,21 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
||||
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/*
|
||||
* On x86_32, the mapping between cpu and logical apicid may vary
|
||||
* depending on apic in use. The following early percpu variable is
|
||||
* used for the mapping. This is where the behaviors of x86_64 and 32
|
||||
* actually diverge. Let's keep it ugly for now.
|
||||
*/
|
||||
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
|
||||
|
||||
/*
|
||||
* Knob to control our willingness to enable the local APIC.
|
||||
*
|
||||
* +1=force-enable
|
||||
*/
|
||||
static int force_enable_local_apic;
|
||||
static int force_enable_local_apic __initdata;
|
||||
/*
|
||||
* APIC command line parameters
|
||||
*/
|
||||
@@ -153,7 +163,7 @@ early_param("nox2apic", setup_nox2apic);
|
||||
unsigned long mp_lapic_addr;
|
||||
int disable_apic;
|
||||
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
|
||||
static int disable_apic_timer __cpuinitdata;
|
||||
static int disable_apic_timer __initdata;
|
||||
/* Local APIC timer works in C2 */
|
||||
int local_apic_timer_c2_ok;
|
||||
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
||||
@@ -177,29 +187,8 @@ static struct resource lapic_resource = {
|
||||
|
||||
static unsigned int calibration_result;
|
||||
|
||||
static int lapic_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask);
|
||||
static void apic_pm_activate(void);
|
||||
|
||||
/*
|
||||
* The local apic timer can be used for any function which is CPU local.
|
||||
*/
|
||||
static struct clock_event_device lapic_clockevent = {
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
||||
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_mode = lapic_timer_setup,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
static unsigned long apic_phys;
|
||||
|
||||
/*
|
||||
@@ -238,7 +227,7 @@ static int modern_apic(void)
|
||||
* right after this call apic become NOOP driven
|
||||
* so apic->write/read doesn't do anything
|
||||
*/
|
||||
void apic_disable(void)
|
||||
static void __init apic_disable(void)
|
||||
{
|
||||
pr_info("APIC: switched to apic NOOP\n");
|
||||
apic = &apic_noop;
|
||||
@@ -282,23 +271,6 @@ u64 native_apic_icr_read(void)
|
||||
return icr1 | ((u64)icr2 << 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
|
||||
*/
|
||||
void __cpuinit enable_NMI_through_LVT0(void)
|
||||
{
|
||||
unsigned int v;
|
||||
|
||||
/* unmask and set to NMI */
|
||||
v = APIC_DM_NMI;
|
||||
|
||||
/* Level triggered for 82489DX (32bit mode) */
|
||||
if (!lapic_is_integrated())
|
||||
v |= APIC_LVT_LEVEL_TRIGGER;
|
||||
|
||||
apic_write(APIC_LVT0, v);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/**
|
||||
* get_physical_broadcast - Get number of physical broadcast IDs
|
||||
@@ -508,6 +480,23 @@ static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The local apic timer can be used for any function which is CPU local.
|
||||
*/
|
||||
static struct clock_event_device lapic_clockevent = {
|
||||
.name = "lapic",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
|
||||
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
|
||||
.shift = 32,
|
||||
.set_mode = lapic_timer_setup,
|
||||
.set_next_event = lapic_next_event,
|
||||
.broadcast = lapic_timer_broadcast,
|
||||
.rating = 100,
|
||||
.irq = -1,
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
/*
|
||||
* Setup the local APIC timer for this CPU. Copy the initialized values
|
||||
* of the boot CPU and register the clock event in the framework.
|
||||
@@ -1209,7 +1198,7 @@ void __cpuinit setup_local_APIC(void)
|
||||
rdtscll(tsc);
|
||||
|
||||
if (disable_apic) {
|
||||
arch_disable_smp_support();
|
||||
disable_ioapic_support();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1237,6 +1226,19 @@ void __cpuinit setup_local_APIC(void)
|
||||
*/
|
||||
apic->init_apic_ldr();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* APIC LDR is initialized. If logical_apicid mapping was
|
||||
* initialized during get_smp_config(), make sure it matches the
|
||||
* actual value.
|
||||
*/
|
||||
i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
|
||||
/* always use the value from LDR */
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
|
||||
logical_smp_processor_id();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set Task Priority to 'accept all'. We never change this
|
||||
* later on.
|
||||
@@ -1448,7 +1450,7 @@ int __init enable_IR(void)
|
||||
void __init enable_IR_x2apic(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
struct IO_APIC_route_entry **ioapic_entries;
|
||||
int ret, x2apic_enabled = 0;
|
||||
int dmar_table_init_ret;
|
||||
|
||||
@@ -1537,7 +1539,7 @@ static int __init detect_init_APIC(void)
|
||||
}
|
||||
#else
|
||||
|
||||
static int apic_verify(void)
|
||||
static int __init apic_verify(void)
|
||||
{
|
||||
u32 features, h, l;
|
||||
|
||||
@@ -1562,7 +1564,7 @@ static int apic_verify(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apic_force_enable(void)
|
||||
int __init apic_force_enable(unsigned long addr)
|
||||
{
|
||||
u32 h, l;
|
||||
|
||||
@@ -1578,7 +1580,7 @@ int apic_force_enable(void)
|
||||
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
|
||||
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
|
||||
l &= ~MSR_IA32_APICBASE_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
|
||||
l |= MSR_IA32_APICBASE_ENABLE | addr;
|
||||
wrmsr(MSR_IA32_APICBASE, l, h);
|
||||
enabled_via_apicbase = 1;
|
||||
}
|
||||
@@ -1619,7 +1621,7 @@ static int __init detect_init_APIC(void)
|
||||
"you can enable it with \"lapic\"\n");
|
||||
return -1;
|
||||
}
|
||||
if (apic_force_enable())
|
||||
if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
|
||||
return -1;
|
||||
} else {
|
||||
if (apic_verify())
|
||||
@@ -1930,17 +1932,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Validate version
|
||||
*/
|
||||
if (version == 0x0) {
|
||||
pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
|
||||
"fixing up to 0x10. (tell your hw vendor)\n",
|
||||
version);
|
||||
version = 0x10;
|
||||
}
|
||||
apic_version[apicid] = version;
|
||||
|
||||
if (num_processors >= nr_cpu_ids) {
|
||||
int max = nr_cpu_ids;
|
||||
int thiscpu = max + disabled_cpus;
|
||||
@@ -1954,22 +1945,34 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
}
|
||||
|
||||
num_processors++;
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid])
|
||||
WARN_ONCE(1,
|
||||
"ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid == boot_cpu_physical_apicid) {
|
||||
/*
|
||||
* x86_bios_cpu_apicid is required to have processors listed
|
||||
* in same order as logical cpu numbers. Hence the first
|
||||
* entry is BSP, and so on.
|
||||
* boot_cpu_init() already hold bit 0 in cpu_present_mask
|
||||
* for BSP.
|
||||
*/
|
||||
cpu = 0;
|
||||
} else
|
||||
cpu = cpumask_next_zero(-1, cpu_present_mask);
|
||||
|
||||
/*
|
||||
* Validate version
|
||||
*/
|
||||
if (version == 0x0) {
|
||||
pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
|
||||
cpu, apicid);
|
||||
version = 0x10;
|
||||
}
|
||||
apic_version[apicid] = version;
|
||||
|
||||
if (version != apic_version[boot_cpu_physical_apicid]) {
|
||||
pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
|
||||
apic_version[boot_cpu_physical_apicid], cpu, version);
|
||||
}
|
||||
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
if (apicid > max_physical_apicid)
|
||||
max_physical_apicid = apicid;
|
||||
|
||||
@@ -1977,7 +1980,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
|
||||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
@@ -1998,10 +2004,14 @@ void default_init_apic_ldr(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int default_apicid_to_node(int logical_apicid)
|
||||
int default_x86_32_numa_cpu_node(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#ifdef CONFIG_NUMA
|
||||
int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
if (apicid != BAD_APICID)
|
||||
return __apicid_to_node[apicid];
|
||||
return NUMA_NO_NODE;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@@ -185,8 +185,6 @@ struct apic apic_flat = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -337,8 +335,6 @@ struct apic apic_physflat = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int noop_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return 0;
|
||||
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
int noop_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
/* we're always on node 0 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 noop_apic_read(u32 reg)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
|
||||
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
|
||||
WARN_ON_ONCE(cpu_has_apic && !disable_apic);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static int noop_x86_32_numa_cpu_node(int cpu)
|
||||
{
|
||||
/* we're always on node 0 */
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct apic apic_noop = {
|
||||
.name = "noop",
|
||||
.probe = noop_probe,
|
||||
@@ -153,9 +150,7 @@ struct apic apic_noop = {
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = noop_apicid_to_node,
|
||||
|
||||
.cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
|
||||
@@ -197,4 +192,9 @@ struct apic apic_noop = {
|
||||
.icr_write = noop_apic_icr_write,
|
||||
.wait_icr_idle = noop_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node,
|
||||
#endif
|
||||
};
|
||||
|
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int bigsmp_early_logical_apicid(int cpu)
|
||||
{
|
||||
/* on bigsmp, logical apicid is the same as physical */
|
||||
return early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
|
||||
nr_ioapics);
|
||||
}
|
||||
|
||||
static int bigsmp_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
}
|
||||
|
||||
static int bigsmp_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int bigsmp_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
|
||||
static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask));
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_physical_id(cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
return bigsmp_cpu_to_logical_apicid(cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
|
||||
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
|
||||
.setup_apic_routing = bigsmp_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = bigsmp_apicid_to_node,
|
||||
.cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
|
||||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static int es7000_early_logical_apicid(int cpu)
|
||||
{
|
||||
/* on es7000, logical apicid is the same as physical */
|
||||
return early_per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
}
|
||||
|
||||
static unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
|
||||
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
|
||||
}
|
||||
|
||||
static int es7000_apicid_to_node(int logical_apicid)
|
||||
static int es7000_numa_cpu_node(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int es7000_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (!mps_cpu)
|
||||
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
|
||||
++cpu_id;
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static int es7000_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
|
||||
{
|
||||
/* For clustered we don't have a good way to do this yet - hack */
|
||||
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
int new_apicid = es7000_cpu_to_logical_apicid(cpu);
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
WARN(1, "Not a valid mask!");
|
||||
@@ -578,7 +571,7 @@ static unsigned int
|
||||
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = es7000_cpu_to_logical_apicid(0);
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
.setup_apic_routing = es7000_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = es7000_apicid_to_node,
|
||||
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
|
||||
};
|
||||
|
||||
struct apic __refdata apic_es7000 = {
|
||||
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
.setup_apic_routing = es7000_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = es7000_apicid_to_node,
|
||||
.cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = es7000_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = es7000_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = es7000_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = es7000_numa_cpu_node,
|
||||
};
|
||||
|
@@ -83,7 +83,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
|
||||
arch_spin_lock(&lock);
|
||||
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
||||
show_regs(regs);
|
||||
dump_stack();
|
||||
arch_spin_unlock(&lock);
|
||||
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
||||
return NOTIFY_STOP;
|
||||
|
@@ -108,7 +108,10 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
|
||||
|
||||
int skip_ioapic_setup;
|
||||
|
||||
void arch_disable_smp_support(void)
|
||||
/**
|
||||
* disable_ioapic_support() - disables ioapic support at runtime
|
||||
*/
|
||||
void disable_ioapic_support(void)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
noioapicquirk = 1;
|
||||
@@ -120,11 +123,14 @@ void arch_disable_smp_support(void)
|
||||
static int __init parse_noapic(char *str)
|
||||
{
|
||||
/* disable IO-APIC */
|
||||
arch_disable_smp_support();
|
||||
disable_ioapic_support();
|
||||
return 0;
|
||||
}
|
||||
early_param("noapic", parse_noapic);
|
||||
|
||||
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
|
||||
struct io_apic_irq_attr *attr);
|
||||
|
||||
/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
|
||||
void mp_save_irq(struct mpc_intsrc *m)
|
||||
{
|
||||
@@ -181,7 +187,7 @@ int __init arch_early_irq_init(void)
|
||||
irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
set_irq_chip_data(i, &cfg[i]);
|
||||
irq_set_chip_data(i, &cfg[i]);
|
||||
zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
|
||||
zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
|
||||
/*
|
||||
@@ -200,7 +206,7 @@ int __init arch_early_irq_init(void)
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
{
|
||||
return get_irq_chip_data(irq);
|
||||
return irq_get_chip_data(irq);
|
||||
}
|
||||
|
||||
static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
|
||||
@@ -226,7 +232,7 @@ static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
|
||||
{
|
||||
if (!cfg)
|
||||
return;
|
||||
set_irq_chip_data(at, NULL);
|
||||
irq_set_chip_data(at, NULL);
|
||||
free_cpumask_var(cfg->domain);
|
||||
free_cpumask_var(cfg->old_domain);
|
||||
kfree(cfg);
|
||||
@@ -256,14 +262,14 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
|
||||
if (res < 0) {
|
||||
if (res != -EEXIST)
|
||||
return NULL;
|
||||
cfg = get_irq_chip_data(at);
|
||||
cfg = irq_get_chip_data(at);
|
||||
if (cfg)
|
||||
return cfg;
|
||||
}
|
||||
|
||||
cfg = alloc_irq_cfg(at, node);
|
||||
if (cfg)
|
||||
set_irq_chip_data(at, cfg);
|
||||
irq_set_chip_data(at, cfg);
|
||||
else
|
||||
irq_free_desc(at);
|
||||
return cfg;
|
||||
@@ -818,7 +824,7 @@ static int EISA_ELCR(unsigned int irq)
|
||||
#define default_MCA_trigger(idx) (1)
|
||||
#define default_MCA_polarity(idx) default_ISA_polarity(idx)
|
||||
|
||||
static int MPBIOS_polarity(int idx)
|
||||
static int irq_polarity(int idx)
|
||||
{
|
||||
int bus = mp_irqs[idx].srcbus;
|
||||
int polarity;
|
||||
@@ -860,7 +866,7 @@ static int MPBIOS_polarity(int idx)
|
||||
return polarity;
|
||||
}
|
||||
|
||||
static int MPBIOS_trigger(int idx)
|
||||
static int irq_trigger(int idx)
|
||||
{
|
||||
int bus = mp_irqs[idx].srcbus;
|
||||
int trigger;
|
||||
@@ -932,16 +938,6 @@ static int MPBIOS_trigger(int idx)
|
||||
return trigger;
|
||||
}
|
||||
|
||||
static inline int irq_polarity(int idx)
|
||||
{
|
||||
return MPBIOS_polarity(idx);
|
||||
}
|
||||
|
||||
static inline int irq_trigger(int idx)
|
||||
{
|
||||
return MPBIOS_trigger(idx);
|
||||
}
|
||||
|
||||
static int pin_2_irq(int idx, int apic, int pin)
|
||||
{
|
||||
int irq;
|
||||
@@ -1189,7 +1185,7 @@ void __setup_vector_irq(int cpu)
|
||||
raw_spin_lock(&vector_lock);
|
||||
/* Mark the inuse vectors */
|
||||
for_each_active_irq(irq) {
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (!cfg)
|
||||
continue;
|
||||
/*
|
||||
@@ -1220,10 +1216,6 @@ void __setup_vector_irq(int cpu)
|
||||
static struct irq_chip ioapic_chip;
|
||||
static struct irq_chip ir_ioapic_chip;
|
||||
|
||||
#define IOAPIC_AUTO -1
|
||||
#define IOAPIC_EDGE 0
|
||||
#define IOAPIC_LEVEL 1
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static inline int IO_APIC_irq_trigger(int irq)
|
||||
{
|
||||
@@ -1248,35 +1240,31 @@ static inline int IO_APIC_irq_trigger(int irq)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
|
||||
static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
|
||||
unsigned long trigger)
|
||||
{
|
||||
struct irq_chip *chip = &ioapic_chip;
|
||||
irq_flow_handler_t hdl;
|
||||
bool fasteoi;
|
||||
|
||||
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
||||
trigger == IOAPIC_LEVEL)
|
||||
trigger == IOAPIC_LEVEL) {
|
||||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
else
|
||||
fasteoi = true;
|
||||
} else {
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
if (trigger)
|
||||
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
|
||||
handle_fasteoi_irq,
|
||||
"fasteoi");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
return;
|
||||
fasteoi = false;
|
||||
}
|
||||
|
||||
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
||||
trigger == IOAPIC_LEVEL)
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_fasteoi_irq,
|
||||
"fasteoi");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
if (irq_remapped(cfg)) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
chip = &ir_ioapic_chip;
|
||||
fasteoi = trigger != 0;
|
||||
}
|
||||
|
||||
hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
|
||||
irq_set_chip_and_handler_name(irq, chip, hdl,
|
||||
fasteoi ? "fasteoi" : "edge");
|
||||
}
|
||||
|
||||
static int setup_ioapic_entry(int apic_id, int irq,
|
||||
@@ -1374,7 +1362,7 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
|
||||
return;
|
||||
}
|
||||
|
||||
ioapic_register_intr(irq, trigger);
|
||||
ioapic_register_intr(irq, cfg, trigger);
|
||||
if (irq < legacy_pic->nr_legacy_irqs)
|
||||
legacy_pic->mask(irq);
|
||||
|
||||
@@ -1385,33 +1373,26 @@ static struct {
|
||||
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
|
||||
} mp_ioapic_routing[MAX_IO_APICS];
|
||||
|
||||
static void __init setup_IO_APIC_irqs(void)
|
||||
static bool __init io_apic_pin_not_connected(int idx, int apic_id, int pin)
|
||||
{
|
||||
int apic_id, pin, idx, irq, notcon = 0;
|
||||
int node = cpu_to_node(0);
|
||||
struct irq_cfg *cfg;
|
||||
if (idx != -1)
|
||||
return false;
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __init __io_apic_setup_irqs(unsigned int apic_id)
|
||||
{
|
||||
int idx, node = cpu_to_node(0);
|
||||
struct io_apic_irq_attr attr;
|
||||
unsigned int pin, irq;
|
||||
|
||||
for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
|
||||
idx = find_irq_entry(apic_id, pin, mp_INT);
|
||||
if (idx == -1) {
|
||||
if (!notcon) {
|
||||
notcon = 1;
|
||||
apic_printk(APIC_VERBOSE,
|
||||
KERN_DEBUG " %d-%d",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
} else
|
||||
apic_printk(APIC_VERBOSE, " %d-%d",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
if (io_apic_pin_not_connected(idx, apic_id, pin))
|
||||
continue;
|
||||
}
|
||||
if (notcon) {
|
||||
apic_printk(APIC_VERBOSE,
|
||||
" (apicid-pin) not connected\n");
|
||||
notcon = 0;
|
||||
}
|
||||
|
||||
irq = pin_2_irq(idx, apic_id, pin);
|
||||
|
||||
@@ -1423,25 +1404,24 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
* installed and if it returns 1:
|
||||
*/
|
||||
if (apic->multi_timer_check &&
|
||||
apic->multi_timer_check(apic_id, irq))
|
||||
apic->multi_timer_check(apic_id, irq))
|
||||
continue;
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
continue;
|
||||
set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
|
||||
add_pin_to_irq_node(cfg, node, apic_id, pin);
|
||||
/*
|
||||
* don't mark it in pin_programmed, so later acpi could
|
||||
* set it correctly when irq < 16
|
||||
*/
|
||||
setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
io_apic_setup_irq_pin(irq, node, &attr);
|
||||
}
|
||||
}
|
||||
|
||||
if (notcon)
|
||||
apic_printk(APIC_VERBOSE,
|
||||
" (apicid-pin) not connected\n");
|
||||
static void __init setup_IO_APIC_irqs(void)
|
||||
{
|
||||
unsigned int apic_id;
|
||||
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
|
||||
|
||||
for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
|
||||
__io_apic_setup_irqs(apic_id);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1452,7 +1432,7 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
void setup_IO_APIC_irq_extra(u32 gsi)
|
||||
{
|
||||
int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
|
||||
struct irq_cfg *cfg;
|
||||
struct io_apic_irq_attr attr;
|
||||
|
||||
/*
|
||||
* Convert 'gsi' to 'ioapic.pin'.
|
||||
@@ -1472,21 +1452,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
|
||||
if (apic_id == 0 || irq < NR_IRQS_LEGACY)
|
||||
return;
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
return;
|
||||
set_io_apic_irq_attr(&attr, apic_id, pin, irq_trigger(idx),
|
||||
irq_polarity(idx));
|
||||
|
||||
add_pin_to_irq_node(cfg, node, apic_id, pin);
|
||||
|
||||
if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[apic_id].apicid, pin);
|
||||
return;
|
||||
}
|
||||
set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
|
||||
|
||||
setup_ioapic_irq(apic_id, pin, irq, cfg,
|
||||
irq_trigger(idx), irq_polarity(idx));
|
||||
io_apic_setup_irq_pin_once(irq, node, &attr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1518,7 +1487,8 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
|
||||
* The timer IRQ doesn't have to know that behind the
|
||||
* scene we may have a 8259A-master in AEOI mode ...
|
||||
*/
|
||||
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
|
||||
irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
|
||||
/*
|
||||
* Add it to the IO-APIC irq-routing table:
|
||||
@@ -1625,7 +1595,7 @@ __apicdebuginit(void) print_IO_APIC(void)
|
||||
for_each_active_irq(irq) {
|
||||
struct irq_pin_list *entry;
|
||||
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (!cfg)
|
||||
continue;
|
||||
entry = cfg->irq_2_pin;
|
||||
@@ -2391,7 +2361,7 @@ static void irq_complete_move(struct irq_cfg *cfg)
|
||||
|
||||
void irq_force_complete_move(int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
|
||||
if (!cfg)
|
||||
return;
|
||||
@@ -2405,7 +2375,7 @@ static inline void irq_complete_move(struct irq_cfg *cfg) { }
|
||||
static void ack_apic_edge(struct irq_data *data)
|
||||
{
|
||||
irq_complete_move(data->chip_data);
|
||||
move_native_irq(data->irq);
|
||||
irq_move_irq(data);
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
@@ -2462,7 +2432,7 @@ static void ack_apic_level(struct irq_data *data)
|
||||
irq_complete_move(cfg);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
/* If we are moving the irq we need to mask it */
|
||||
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
|
||||
if (unlikely(irqd_is_setaffinity_pending(data))) {
|
||||
do_unmask_irq = 1;
|
||||
mask_ioapic(cfg);
|
||||
}
|
||||
@@ -2551,7 +2521,7 @@ static void ack_apic_level(struct irq_data *data)
|
||||
* and you can go talk to the chipset vendor about it.
|
||||
*/
|
||||
if (!io_apic_level_ack_pending(cfg))
|
||||
move_masked_irq(irq);
|
||||
irq_move_masked_irq(data);
|
||||
unmask_ioapic(cfg);
|
||||
}
|
||||
}
|
||||
@@ -2614,7 +2584,7 @@ static inline void init_IO_APIC_traps(void)
|
||||
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
||||
*/
|
||||
for_each_active_irq(irq) {
|
||||
cfg = get_irq_chip_data(irq);
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
|
||||
/*
|
||||
* Hmm.. We don't have an entry for this,
|
||||
@@ -2625,7 +2595,7 @@ static inline void init_IO_APIC_traps(void)
|
||||
legacy_pic->make_irq(irq);
|
||||
else
|
||||
/* Strange. Oh, well.. */
|
||||
set_irq_chip(irq, &no_irq_chip);
|
||||
irq_set_chip(irq, &no_irq_chip);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2665,7 +2635,7 @@ static struct irq_chip lapic_chip __read_mostly = {
|
||||
static void lapic_register_intr(int irq)
|
||||
{
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
}
|
||||
|
||||
@@ -2749,7 +2719,7 @@ int timer_through_8259 __initdata;
|
||||
*/
|
||||
static inline void __init check_timer(void)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(0);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(0);
|
||||
int node = cpu_to_node(0);
|
||||
int apic1, pin1, apic2, pin2;
|
||||
unsigned long flags;
|
||||
@@ -3060,7 +3030,7 @@ unsigned int create_irq_nr(unsigned int from, int node)
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
if (ret) {
|
||||
set_irq_chip_data(irq, cfg);
|
||||
irq_set_chip_data(irq, cfg);
|
||||
irq_clear_status_flags(irq, IRQ_NOREQUEST);
|
||||
} else {
|
||||
free_irq_at(irq, cfg);
|
||||
@@ -3085,7 +3055,7 @@ int create_irq(void)
|
||||
|
||||
void destroy_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
unsigned long flags;
|
||||
|
||||
irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
|
||||
@@ -3119,7 +3089,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
if (irq_remapped(cfg)) {
|
||||
struct irte irte;
|
||||
int ir_index;
|
||||
u16 sub_handle;
|
||||
@@ -3291,6 +3261,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
|
||||
|
||||
static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
|
||||
{
|
||||
struct irq_chip *chip = &msi_chip;
|
||||
struct msi_msg msg;
|
||||
int ret;
|
||||
|
||||
@@ -3298,14 +3269,15 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
set_irq_msi(irq, msidesc);
|
||||
irq_set_msi_desc(irq, msidesc);
|
||||
write_msi_msg(irq, &msg);
|
||||
|
||||
if (irq_remapped(get_irq_chip_data(irq))) {
|
||||
if (irq_remapped(irq_get_chip_data(irq))) {
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
|
||||
} else
|
||||
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
|
||||
chip = &msi_ir_chip;
|
||||
}
|
||||
|
||||
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
|
||||
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
|
||||
|
||||
@@ -3423,8 +3395,8 @@ int arch_setup_dmar_msi(unsigned int irq)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
dmar_msi_write(irq, &msg);
|
||||
set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
|
||||
"edge");
|
||||
irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
|
||||
"edge");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -3482,6 +3454,7 @@ static struct irq_chip hpet_msi_type = {
|
||||
|
||||
int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
{
|
||||
struct irq_chip *chip = &hpet_msi_type;
|
||||
struct msi_msg msg;
|
||||
int ret;
|
||||
|
||||
@@ -3501,15 +3474,12 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
hpet_msi_write(get_irq_data(irq), &msg);
|
||||
hpet_msi_write(irq_get_handler_data(irq), &msg);
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
if (irq_remapped(get_irq_chip_data(irq)))
|
||||
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
|
||||
handle_edge_irq, "edge");
|
||||
else
|
||||
set_irq_chip_and_handler_name(irq, &hpet_msi_type,
|
||||
handle_edge_irq, "edge");
|
||||
if (irq_remapped(irq_get_chip_data(irq)))
|
||||
chip = &ir_hpet_msi_type;
|
||||
|
||||
irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
@@ -3596,7 +3566,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
irq_set_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
handle_edge_irq, "edge");
|
||||
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
|
||||
@@ -3605,7 +3575,40 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
}
|
||||
#endif /* CONFIG_HT_IRQ */
|
||||
|
||||
int __init io_apic_get_redir_entries (int ioapic)
|
||||
int
|
||||
io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
|
||||
{
|
||||
struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
int ret;
|
||||
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
|
||||
if (!ret)
|
||||
setup_ioapic_irq(attr->ioapic, attr->ioapic_pin, irq, cfg,
|
||||
attr->trigger, attr->polarity);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_apic_setup_irq_pin_once(unsigned int irq, int node,
|
||||
struct io_apic_irq_attr *attr)
|
||||
{
|
||||
unsigned int id = attr->ioapic, pin = attr->ioapic_pin;
|
||||
int ret;
|
||||
|
||||
/* Avoid redundant programming */
|
||||
if (test_bit(pin, mp_ioapic_routing[id].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[id].apicid, pin);
|
||||
return 0;
|
||||
}
|
||||
ret = io_apic_setup_irq_pin(irq, node, attr);
|
||||
if (!ret)
|
||||
set_bit(pin, mp_ioapic_routing[id].pin_programmed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init io_apic_get_redir_entries(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@@ -3659,96 +3662,24 @@ int __init arch_probe_nr_irqs(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
int node;
|
||||
int ioapic, pin;
|
||||
int trigger, polarity;
|
||||
|
||||
ioapic = irq_attr->ioapic;
|
||||
if (!IO_APIC_IRQ(irq)) {
|
||||
apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
|
||||
ioapic);
|
||||
irq_attr->ioapic);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev)
|
||||
node = dev_to_node(dev);
|
||||
else
|
||||
node = cpu_to_node(0);
|
||||
node = dev ? dev_to_node(dev) : cpu_to_node(0);
|
||||
|
||||
cfg = alloc_irq_and_cfg_at(irq, node);
|
||||
if (!cfg)
|
||||
return 0;
|
||||
|
||||
pin = irq_attr->ioapic_pin;
|
||||
trigger = irq_attr->trigger;
|
||||
polarity = irq_attr->polarity;
|
||||
|
||||
/*
|
||||
* IRQs < 16 are already in the irq_2_pin[] map
|
||||
*/
|
||||
if (irq >= legacy_pic->nr_legacy_irqs) {
|
||||
if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
|
||||
printk(KERN_INFO "can not add pin %d for irq %d\n",
|
||||
pin, irq);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr)
|
||||
{
|
||||
int ioapic, pin;
|
||||
/*
|
||||
* Avoid pin reprogramming. PRTs typically include entries
|
||||
* with redundant pin->gsi mappings (but unique PCI devices);
|
||||
* we only program the IOAPIC on the first.
|
||||
*/
|
||||
ioapic = irq_attr->ioapic;
|
||||
pin = irq_attr->ioapic_pin;
|
||||
if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapics[ioapic].apicid, pin);
|
||||
return 0;
|
||||
}
|
||||
set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
|
||||
|
||||
return __io_apic_set_pci_routing(dev, irq, irq_attr);
|
||||
}
|
||||
|
||||
u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
return io_apic_get_unique_id(nr_ioapics, id);
|
||||
else
|
||||
return id;
|
||||
#else
|
||||
int i;
|
||||
DECLARE_BITMAP(used, 256);
|
||||
|
||||
bitmap_zero(used, 256);
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
struct mpc_ioapic *ia = &mp_ioapics[i];
|
||||
__set_bit(ia->apicid, used);
|
||||
}
|
||||
if (!test_bit(id, used))
|
||||
return id;
|
||||
return find_first_zero_bit(used, 256);
|
||||
#endif
|
||||
return io_apic_setup_irq_pin_once(irq, node, irq_attr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
static int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
{
|
||||
union IO_APIC_reg_00 reg_00;
|
||||
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
|
||||
@@ -3821,9 +3752,33 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
|
||||
return apic_id;
|
||||
}
|
||||
|
||||
static u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
return io_apic_get_unique_id(nr_ioapics, id);
|
||||
else
|
||||
return id;
|
||||
}
|
||||
#else
|
||||
static u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
int i;
|
||||
DECLARE_BITMAP(used, 256);
|
||||
|
||||
bitmap_zero(used, 256);
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
struct mpc_ioapic *ia = &mp_ioapics[i];
|
||||
__set_bit(ia->apicid, used);
|
||||
}
|
||||
if (!test_bit(id, used))
|
||||
return id;
|
||||
return find_first_zero_bit(used, 256);
|
||||
}
|
||||
#endif
|
||||
|
||||
int __init io_apic_get_version(int ioapic)
|
||||
static int __init io_apic_get_version(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@@ -3868,8 +3823,8 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
|
||||
void __init setup_ioapic_dest(void)
|
||||
{
|
||||
int pin, ioapic, irq, irq_entry;
|
||||
struct irq_desc *desc;
|
||||
const struct cpumask *mask;
|
||||
struct irq_data *idata;
|
||||
|
||||
if (skip_ioapic_setup == 1)
|
||||
return;
|
||||
@@ -3884,21 +3839,20 @@ void __init setup_ioapic_dest(void)
|
||||
if ((ioapic > 0) && (irq > 16))
|
||||
continue;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
idata = irq_get_irq_data(irq);
|
||||
|
||||
/*
|
||||
* Honour affinities which have been set in early boot
|
||||
*/
|
||||
if (desc->status &
|
||||
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
|
||||
mask = desc->irq_data.affinity;
|
||||
if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
|
||||
mask = idata->affinity;
|
||||
else
|
||||
mask = apic->target_cpus();
|
||||
|
||||
if (intr_remapping_enabled)
|
||||
ir_ioapic_set_affinity(&desc->irq_data, mask, false);
|
||||
ir_ioapic_set_affinity(idata, mask, false);
|
||||
else
|
||||
ioapic_set_affinity(&desc->irq_data, mask, false);
|
||||
ioapic_set_affinity(idata, mask, false);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -4026,7 +3980,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
|
||||
return gsi - mp_gsi_routing[ioapic].gsi_base;
|
||||
}
|
||||
|
||||
static int bad_ioapic(unsigned long address)
|
||||
static __init int bad_ioapic(unsigned long address)
|
||||
{
|
||||
if (nr_ioapics >= MAX_IO_APICS) {
|
||||
printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
|
||||
@@ -4086,20 +4040,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
/* Enable IOAPIC early just for system timer */
|
||||
void __init pre_init_apic_IRQ0(void)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
|
||||
|
||||
printk(KERN_INFO "Early APIC setup for system timer0\n");
|
||||
#ifndef CONFIG_SMP
|
||||
physid_set_mask_of_physid(boot_cpu_physical_apicid,
|
||||
&phys_cpu_present_map);
|
||||
#endif
|
||||
/* Make sure the irq descriptor is set up */
|
||||
cfg = alloc_irq_and_cfg_at(0, 0);
|
||||
|
||||
setup_local_APIC();
|
||||
|
||||
add_pin_to_irq_node(cfg, 0, 0, 0);
|
||||
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
|
||||
|
||||
setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
|
||||
io_apic_setup_irq_pin(0, 0, &attr);
|
||||
irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
}
|
||||
|
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
__default_send_IPI_dest_field(
|
||||
apic->cpu_to_logical_apicid(query_cpu), vector,
|
||||
apic->dest_logical);
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, apic->dest_logical);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
||||
if (query_cpu == this_cpu)
|
||||
continue;
|
||||
__default_send_IPI_dest_field(
|
||||
apic->cpu_to_logical_apicid(query_cpu), vector,
|
||||
apic->dest_logical);
|
||||
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, apic->dest_logical);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/*
|
||||
* This is only used on smaller machines.
|
||||
*/
|
||||
|
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
|
||||
return physids_promote(0xFUL, retmap);
|
||||
}
|
||||
|
||||
static inline int numaq_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
}
|
||||
|
||||
/*
|
||||
* Supporting over 60 cpus on NUMA-Q requires a locality-dependent
|
||||
* cpu to APIC ID relation to properly interact with the intelligent
|
||||
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
|
||||
return logical_apicid >> 4;
|
||||
}
|
||||
|
||||
static int numaq_numa_cpu_node(int cpu)
|
||||
{
|
||||
int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (logical_apicid != BAD_APICID)
|
||||
return numaq_apicid_to_node(logical_apicid);
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
|
||||
{
|
||||
int node = numaq_apicid_to_node(logical_apicid);
|
||||
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
|
||||
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
|
||||
.setup_apic_routing = numaq_setup_apic_routing,
|
||||
.multi_timer_check = numaq_multi_timer_check,
|
||||
.apicid_to_node = numaq_apicid_to_node,
|
||||
.cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = numaq_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = numaq_apicid_to_cpu_present,
|
||||
.setup_portio_remap = numaq_setup_portio_remap,
|
||||
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = numaq_numa_cpu_node,
|
||||
};
|
||||
|
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
|
||||
apic->setup_apic_routing();
|
||||
}
|
||||
|
||||
static int default_x86_32_early_logical_apicid(int cpu)
|
||||
{
|
||||
return 1 << cpu;
|
||||
}
|
||||
|
||||
static void setup_apic_flat_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
@@ -130,8 +135,6 @@ struct apic apic_default = {
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
.setup_apic_routing = setup_apic_flat_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = default_apicid_to_node,
|
||||
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = physid_set_mask_of_physid,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -167,6 +170,9 @@ struct apic apic_default = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = default_x86_32_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
||||
extern struct apic apic_numaq;
|
||||
|
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void summit_init_apic_ldr(void)
|
||||
static int summit_early_logical_apicid(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
int count = 0;
|
||||
u8 my_id = (u8)hard_smp_processor_id();
|
||||
u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
u8 my_cluster = APIC_CLUSTER(my_id);
|
||||
#ifdef CONFIG_SMP
|
||||
u8 lid;
|
||||
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
|
||||
|
||||
/* Create logical APIC IDs by counting CPUs already in cluster. */
|
||||
for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
|
||||
lid = cpu_2_logical_apicid[i];
|
||||
lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
|
||||
if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
|
||||
++count;
|
||||
}
|
||||
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
|
||||
/* We only have a 4 wide bitmap in cluster mode. If a deranged
|
||||
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
|
||||
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
|
||||
id = my_cluster | (1UL << count);
|
||||
return my_cluster | (1UL << count);
|
||||
}
|
||||
|
||||
static void summit_init_apic_ldr(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
unsigned long val;
|
||||
|
||||
apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
|
||||
nr_ioapics);
|
||||
}
|
||||
|
||||
static int summit_apicid_to_node(int logical_apicid)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return apicid_2_node[hard_smp_processor_id()];
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int summit_cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
}
|
||||
|
||||
static int summit_cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
int new_apicid = summit_cpu_to_logical_apicid(cpu);
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
printk("%s: Not a valid mask!\n", __func__);
|
||||
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int apicid = summit_cpu_to_logical_apicid(0);
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
@@ -528,8 +514,6 @@ struct apic apic_summit = {
|
||||
.ioapic_phys_id_map = summit_ioapic_phys_id_map,
|
||||
.setup_apic_routing = summit_setup_apic_routing,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = summit_apicid_to_node,
|
||||
.cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
|
||||
.cpu_present_to_apicid = summit_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = summit_apicid_to_cpu_present,
|
||||
.setup_portio_remap = NULL,
|
||||
@@ -565,4 +549,7 @@ struct apic apic_summit = {
|
||||
.icr_write = native_apic_icr_write,
|
||||
.wait_icr_idle = native_apic_wait_icr_idle,
|
||||
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
|
||||
|
||||
.x86_32_early_logical_apicid = summit_early_logical_apicid,
|
||||
.x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
|
||||
};
|
||||
|
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
|
||||
.ioapic_phys_id_map = NULL,
|
||||
.setup_apic_routing = NULL,
|
||||
.multi_timer_check = NULL,
|
||||
.apicid_to_node = NULL,
|
||||
.cpu_to_logical_apicid = NULL,
|
||||
.cpu_present_to_apicid = default_cpu_present_to_apicid,
|
||||
.apicid_to_cpu_present = NULL,
|
||||
.setup_portio_remap = NULL,
|
||||
|
@@ -1,5 +1,70 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
#define COMPILE_OFFSETS
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
#include <xen/interface/xen.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "asm-offsets_32.c"
|
||||
#else
|
||||
# include "asm-offsets_64.c"
|
||||
#endif
|
||||
|
||||
void common(void) {
|
||||
BLANK();
|
||||
OFFSET(TI_flags, thread_info, flags);
|
||||
OFFSET(TI_status, thread_info, status);
|
||||
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
||||
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
||||
|
||||
BLANK();
|
||||
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
||||
|
||||
BLANK();
|
||||
OFFSET(pbe_address, pbe, address);
|
||||
OFFSET(pbe_orig_address, pbe, orig_address);
|
||||
OFFSET(pbe_next, pbe, next);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
||||
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
}
|
||||
|
@@ -1,26 +1,4 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed
|
||||
* to extract and format the required data.
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#include <linux/lguest.h>
|
||||
#include "../../../drivers/lguest/lg.h"
|
||||
@@ -51,21 +29,10 @@ void foo(void)
|
||||
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
|
||||
BLANK();
|
||||
|
||||
OFFSET(TI_task, thread_info, task);
|
||||
OFFSET(TI_exec_domain, thread_info, exec_domain);
|
||||
OFFSET(TI_flags, thread_info, flags);
|
||||
OFFSET(TI_status, thread_info, status);
|
||||
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
||||
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
||||
OFFSET(TI_restart_block, thread_info, restart_block);
|
||||
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
|
||||
OFFSET(TI_cpu, thread_info, cpu);
|
||||
BLANK();
|
||||
|
||||
OFFSET(GDS_size, desc_ptr, size);
|
||||
OFFSET(GDS_address, desc_ptr, address);
|
||||
BLANK();
|
||||
|
||||
OFFSET(PT_EBX, pt_regs, bx);
|
||||
OFFSET(PT_ECX, pt_regs, cx);
|
||||
OFFSET(PT_EDX, pt_regs, dx);
|
||||
@@ -85,42 +52,13 @@ void foo(void)
|
||||
OFFSET(PT_OLDSS, pt_regs, ss);
|
||||
BLANK();
|
||||
|
||||
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
|
||||
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
|
||||
BLANK();
|
||||
|
||||
OFFSET(pbe_address, pbe, address);
|
||||
OFFSET(pbe_orig_address, pbe, orig_address);
|
||||
OFFSET(pbe_next, pbe, next);
|
||||
|
||||
/* Offset from the sysenter stack to tss.sp0 */
|
||||
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
|
||||
sizeof(struct tss_struct));
|
||||
|
||||
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
||||
DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
|
||||
DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
|
||||
|
||||
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
|
||||
BLANK();
|
||||
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
|
||||
@@ -139,11 +77,4 @@ void foo(void)
|
||||
OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
|
||||
OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
}
|
||||
|
@@ -1,27 +1,4 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
#define COMPILE_OFFSETS
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ia32.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
#include <asm/sigframe.h>
|
||||
|
||||
#define __NO_STUBS 1
|
||||
#undef __SYSCALL
|
||||
@@ -33,41 +10,19 @@ static char syscalls[] = {
|
||||
|
||||
int main(void)
|
||||
{
|
||||
#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
|
||||
ENTRY(state);
|
||||
ENTRY(flags);
|
||||
ENTRY(pid);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
|
||||
ENTRY(flags);
|
||||
ENTRY(addr_limit);
|
||||
ENTRY(preempt_count);
|
||||
ENTRY(status);
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
ENTRY(sysenter_return);
|
||||
#endif
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
BLANK();
|
||||
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
|
||||
OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
|
||||
OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
|
||||
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
|
||||
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
|
||||
OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
|
||||
OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
|
||||
OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
|
||||
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
|
||||
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
||||
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
|
||||
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
|
||||
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
|
||||
BLANK();
|
||||
|
||||
#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
|
||||
ENTRY(ax);
|
||||
ENTRY(bx);
|
||||
ENTRY(cx);
|
||||
@@ -79,15 +34,12 @@ int main(void)
|
||||
ENTRY(ip);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
DEFINE(IA32_RT_SIGFRAME_sigcontext,
|
||||
offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
|
||||
|
||||
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
|
||||
BLANK();
|
||||
#endif
|
||||
DEFINE(pbe_address, offsetof(struct pbe, address));
|
||||
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
|
||||
DEFINE(pbe_next, offsetof(struct pbe, next));
|
||||
BLANK();
|
||||
#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
|
||||
|
||||
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
|
||||
ENTRY(bx);
|
||||
ENTRY(bx);
|
||||
ENTRY(cx);
|
||||
@@ -107,7 +59,8 @@ int main(void)
|
||||
ENTRY(flags);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
|
||||
|
||||
#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
|
||||
ENTRY(cr0);
|
||||
ENTRY(cr2);
|
||||
ENTRY(cr3);
|
||||
@@ -115,26 +68,11 @@ int main(void)
|
||||
ENTRY(cr8);
|
||||
BLANK();
|
||||
#undef ENTRY
|
||||
DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
|
||||
BLANK();
|
||||
DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
|
||||
|
||||
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
|
||||
BLANK();
|
||||
|
||||
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
|
||||
|
||||
BLANK();
|
||||
OFFSET(BP_scratch, boot_params, scratch);
|
||||
OFFSET(BP_loadflags, boot_params, hdr.loadflags);
|
||||
OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
|
||||
OFFSET(BP_version, boot_params, hdr.version);
|
||||
OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
|
||||
|
||||
BLANK();
|
||||
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
||||
#ifdef CONFIG_XEN
|
||||
BLANK();
|
||||
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
||||
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
||||
#undef ENTRY
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* To workaround broken NUMA config. Read the comment in
|
||||
* srat_detect_node().
|
||||
*/
|
||||
static int __cpuinit nearby_node(int apicid)
|
||||
{
|
||||
int i, node;
|
||||
|
||||
for (i = apicid - 1; i >= 0; i--) {
|
||||
node = apicid_to_node[i];
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
|
||||
node = apicid_to_node[i];
|
||||
node = __apicid_to_node[i];
|
||||
if (node != NUMA_NO_NODE && node_online(node))
|
||||
return node;
|
||||
}
|
||||
@@ -338,31 +342,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
|
||||
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = c->apicid;
|
||||
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = per_cpu(cpu_llc_id, cpu);
|
||||
|
||||
if (apicid_to_node[apicid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[apicid];
|
||||
if (!node_online(node)) {
|
||||
/* Two possibilities here:
|
||||
- The CPU is missing memory and no node was created.
|
||||
In that case try picking one from a nearby CPU
|
||||
- The APIC IDs differ from the HyperTransport node IDs
|
||||
which the K8 northbridge parsing fills in.
|
||||
Assume they are all increased by a constant offset,
|
||||
but in the same order as the HT nodeids.
|
||||
If that doesn't result in a usable node fall back to the
|
||||
path for the previous case. */
|
||||
|
||||
/*
|
||||
* Two possibilities here:
|
||||
*
|
||||
* - The CPU is missing memory and no node was created. In
|
||||
* that case try picking one from a nearby CPU.
|
||||
*
|
||||
* - The APIC IDs differ from the HyperTransport node IDs
|
||||
* which the K8 northbridge parsing fills in. Assume
|
||||
* they are all increased by a constant offset, but in
|
||||
* the same order as the HT nodeids. If that doesn't
|
||||
* result in a usable node fall back to the path for the
|
||||
* previous case.
|
||||
*
|
||||
* This workaround operates directly on the mapping between
|
||||
* APIC ID and NUMA node, assuming certain relationship
|
||||
* between APIC ID, HT node ID and NUMA topology. As going
|
||||
* through CPU mapping may alter the outcome, directly
|
||||
* access __apicid_to_node[].
|
||||
*/
|
||||
int ht_nodeid = c->initial_apicid;
|
||||
|
||||
if (ht_nodeid >= 0 &&
|
||||
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[ht_nodeid];
|
||||
__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = __apicid_to_node[ht_nodeid];
|
||||
/* Pick a nearby node */
|
||||
if (!node_online(node))
|
||||
node = nearby_node(apicid);
|
||||
|
@@ -675,7 +675,7 @@ void __init early_cpu_init(void)
|
||||
const struct cpu_dev *const *cdev;
|
||||
int count = 0;
|
||||
|
||||
#ifdef PROCESSOR_SELECT
|
||||
#ifdef CONFIG_PROCESSOR_SELECT
|
||||
printk(KERN_INFO "KERNEL supported cpus:\n");
|
||||
#endif
|
||||
|
||||
@@ -687,7 +687,7 @@ void __init early_cpu_init(void)
|
||||
cpu_devs[count] = cpudev;
|
||||
count++;
|
||||
|
||||
#ifdef PROCESSOR_SELECT
|
||||
#ifdef CONFIG_PROCESSOR_SELECT
|
||||
{
|
||||
unsigned int j;
|
||||
|
||||
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
|
||||
select_idle_routine(c);
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
numa_add_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
#ifdef CONFIG_NUMA
|
||||
unsigned node;
|
||||
int cpu = smp_processor_id();
|
||||
int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
/* Don't do the funky fallback heuristics the AMD version employs
|
||||
for now. */
|
||||
node = apicid_to_node[apicid];
|
||||
node = numa_cpu_node(cpu);
|
||||
if (node == NUMA_NO_NODE || !node_online(node)) {
|
||||
/* reuse the value from init_cpu_to_node() */
|
||||
node = cpu_to_node(cpu);
|
||||
|
@@ -768,11 +768,11 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
|
||||
for_each_cpu(i, c->llc_shared_map) {
|
||||
for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
|
||||
if (!per_cpu(ici_cpuid4_info, i))
|
||||
continue;
|
||||
this_leaf = CPUID4_INFO_IDX(i, index);
|
||||
for_each_cpu(sibling, c->llc_shared_map) {
|
||||
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
|
||||
if (!cpu_online(sibling))
|
||||
continue;
|
||||
set_bit(sibling, this_leaf->shared_cpu_map);
|
||||
|
@@ -527,15 +527,12 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
int i, err = 0;
|
||||
struct threshold_bank *b = NULL;
|
||||
char name[32];
|
||||
#ifdef CONFIG_SMP
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
#endif
|
||||
|
||||
sprintf(name, "threshold_bank%i", bank);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
|
||||
i = cpumask_first(c->llc_shared_map);
|
||||
i = cpumask_first(cpu_llc_shared_mask(cpu));
|
||||
|
||||
/* first core not up yet */
|
||||
if (cpu_data(i).cpu_core_id)
|
||||
@@ -555,7 +552,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
cpumask_copy(b->cpus, c->llc_shared_map);
|
||||
cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
goto out;
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#if 0
|
||||
#undef wrmsrl
|
||||
@@ -93,6 +94,8 @@ struct amd_nb {
|
||||
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
|
||||
};
|
||||
|
||||
struct intel_percore;
|
||||
|
||||
#define MAX_LBR_ENTRIES 16
|
||||
|
||||
struct cpu_hw_events {
|
||||
@@ -127,6 +130,13 @@ struct cpu_hw_events {
|
||||
struct perf_branch_stack lbr_stack;
|
||||
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
|
||||
|
||||
/*
|
||||
* Intel percore register state.
|
||||
* Coordinate shared resources between HT threads.
|
||||
*/
|
||||
int percore_used; /* Used by this CPU? */
|
||||
struct intel_percore *per_core;
|
||||
|
||||
/*
|
||||
* AMD specific bits
|
||||
*/
|
||||
@@ -166,8 +176,10 @@ struct cpu_hw_events {
|
||||
/*
|
||||
* Constraint on the Event code + UMask
|
||||
*/
|
||||
#define PEBS_EVENT_CONSTRAINT(c, n) \
|
||||
#define INTEL_UEVENT_CONSTRAINT(c, n) \
|
||||
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
|
||||
#define PEBS_EVENT_CONSTRAINT(c, n) \
|
||||
INTEL_UEVENT_CONSTRAINT(c, n)
|
||||
|
||||
#define EVENT_CONSTRAINT_END \
|
||||
EVENT_CONSTRAINT(0, 0, 0)
|
||||
@@ -175,6 +187,28 @@ struct cpu_hw_events {
|
||||
#define for_each_event_constraint(e, c) \
|
||||
for ((e) = (c); (e)->weight; (e)++)
|
||||
|
||||
/*
|
||||
* Extra registers for specific events.
|
||||
* Some events need large masks and require external MSRs.
|
||||
* Define a mapping to these extra registers.
|
||||
*/
|
||||
struct extra_reg {
|
||||
unsigned int event;
|
||||
unsigned int msr;
|
||||
u64 config_mask;
|
||||
u64 valid_mask;
|
||||
};
|
||||
|
||||
#define EVENT_EXTRA_REG(e, ms, m, vm) { \
|
||||
.event = (e), \
|
||||
.msr = (ms), \
|
||||
.config_mask = (m), \
|
||||
.valid_mask = (vm), \
|
||||
}
|
||||
#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
|
||||
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
|
||||
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
|
||||
|
||||
union perf_capabilities {
|
||||
struct {
|
||||
u64 lbr_format : 6;
|
||||
@@ -219,6 +253,7 @@ struct x86_pmu {
|
||||
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
|
||||
struct perf_event *event);
|
||||
struct event_constraint *event_constraints;
|
||||
struct event_constraint *percore_constraints;
|
||||
void (*quirks)(void);
|
||||
int perfctr_second_write;
|
||||
|
||||
@@ -247,6 +282,11 @@ struct x86_pmu {
|
||||
*/
|
||||
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
|
||||
int lbr_nr; /* hardware stack size */
|
||||
|
||||
/*
|
||||
* Extra registers for events
|
||||
*/
|
||||
struct extra_reg *extra_regs;
|
||||
};
|
||||
|
||||
static struct x86_pmu x86_pmu __read_mostly;
|
||||
@@ -271,6 +311,10 @@ static u64 __read_mostly hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||
static u64 __read_mostly hw_cache_extra_regs
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||
|
||||
/*
|
||||
* Propagate event elapsed time into the generic event.
|
||||
@@ -298,7 +342,7 @@ x86_perf_event_update(struct perf_event *event)
|
||||
*/
|
||||
again:
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
rdmsrl(hwc->event_base + idx, new_raw_count);
|
||||
rdmsrl(hwc->event_base, new_raw_count);
|
||||
|
||||
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count)
|
||||
@@ -321,6 +365,49 @@ again:
|
||||
return new_raw_count;
|
||||
}
|
||||
|
||||
/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
|
||||
static inline int x86_pmu_addr_offset(int index)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
|
||||
return index << 1;
|
||||
return index;
|
||||
}
|
||||
|
||||
static inline unsigned int x86_pmu_config_addr(int index)
|
||||
{
|
||||
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
|
||||
}
|
||||
|
||||
static inline unsigned int x86_pmu_event_addr(int index)
|
||||
{
|
||||
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find and validate any extra registers to set up.
|
||||
*/
|
||||
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
|
||||
{
|
||||
struct extra_reg *er;
|
||||
|
||||
event->hw.extra_reg = 0;
|
||||
event->hw.extra_config = 0;
|
||||
|
||||
if (!x86_pmu.extra_regs)
|
||||
return 0;
|
||||
|
||||
for (er = x86_pmu.extra_regs; er->msr; er++) {
|
||||
if (er->event != (config & er->config_mask))
|
||||
continue;
|
||||
if (event->attr.config1 & ~er->valid_mask)
|
||||
return -EINVAL;
|
||||
event->hw.extra_reg = er->msr;
|
||||
event->hw.extra_config = event->attr.config1;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static atomic_t active_events;
|
||||
static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
@@ -331,12 +418,12 @@ static bool reserve_pmc_hardware(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
|
||||
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
|
||||
goto perfctr_fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
||||
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
|
||||
goto eventsel_fail;
|
||||
}
|
||||
|
||||
@@ -344,13 +431,13 @@ static bool reserve_pmc_hardware(void)
|
||||
|
||||
eventsel_fail:
|
||||
for (i--; i >= 0; i--)
|
||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||
release_evntsel_nmi(x86_pmu_config_addr(i));
|
||||
|
||||
i = x86_pmu.num_counters;
|
||||
|
||||
perfctr_fail:
|
||||
for (i--; i >= 0; i--)
|
||||
release_perfctr_nmi(x86_pmu.perfctr + i);
|
||||
release_perfctr_nmi(x86_pmu_event_addr(i));
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -360,8 +447,8 @@ static void release_pmc_hardware(void)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
release_perfctr_nmi(x86_pmu.perfctr + i);
|
||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||
release_perfctr_nmi(x86_pmu_event_addr(i));
|
||||
release_evntsel_nmi(x86_pmu_config_addr(i));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,7 +469,7 @@ static bool check_hw_exists(void)
|
||||
* complain and bail.
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
reg = x86_pmu.eventsel + i;
|
||||
reg = x86_pmu_config_addr(i);
|
||||
ret = rdmsrl_safe(reg, &val);
|
||||
if (ret)
|
||||
goto msr_fail;
|
||||
@@ -407,8 +494,8 @@ static bool check_hw_exists(void)
|
||||
* that don't trap on the MSR access and always return 0s.
|
||||
*/
|
||||
val = 0xabcdUL;
|
||||
ret = checking_wrmsrl(x86_pmu.perfctr, val);
|
||||
ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
|
||||
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
|
||||
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
|
||||
if (ret || val != val_new)
|
||||
goto msr_fail;
|
||||
|
||||
@@ -442,8 +529,9 @@ static inline int x86_pmu_initialized(void)
|
||||
}
|
||||
|
||||
static inline int
|
||||
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
|
||||
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
unsigned int cache_type, cache_op, cache_result;
|
||||
u64 config, val;
|
||||
|
||||
@@ -470,8 +558,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
|
||||
return -EINVAL;
|
||||
|
||||
hwc->config |= val;
|
||||
|
||||
return 0;
|
||||
attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
|
||||
return x86_pmu_extra_regs(val, event);
|
||||
}
|
||||
|
||||
static int x86_setup_perfctr(struct perf_event *event)
|
||||
@@ -496,10 +584,10 @@ static int x86_setup_perfctr(struct perf_event *event)
|
||||
}
|
||||
|
||||
if (attr->type == PERF_TYPE_RAW)
|
||||
return 0;
|
||||
return x86_pmu_extra_regs(event->attr.config, event);
|
||||
|
||||
if (attr->type == PERF_TYPE_HW_CACHE)
|
||||
return set_ext_hw_attr(hwc, attr);
|
||||
return set_ext_hw_attr(hwc, event);
|
||||
|
||||
if (attr->config >= x86_pmu.max_events)
|
||||
return -EINVAL;
|
||||
@@ -617,11 +705,11 @@ static void x86_pmu_disable_all(void)
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
rdmsrl(x86_pmu.eventsel + idx, val);
|
||||
rdmsrl(x86_pmu_config_addr(idx), val);
|
||||
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
|
||||
continue;
|
||||
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
wrmsrl(x86_pmu.eventsel + idx, val);
|
||||
wrmsrl(x86_pmu_config_addr(idx), val);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -642,21 +730,26 @@ static void x86_pmu_disable(struct pmu *pmu)
|
||||
x86_pmu.disable_all();
|
||||
}
|
||||
|
||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
|
||||
u64 enable_mask)
|
||||
{
|
||||
if (hwc->extra_reg)
|
||||
wrmsrl(hwc->extra_reg, hwc->extra_config);
|
||||
wrmsrl(hwc->config_base, hwc->config | enable_mask);
|
||||
}
|
||||
|
||||
static void x86_pmu_enable_all(int added)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
u64 val;
|
||||
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
val = event->hw.config;
|
||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
wrmsrl(x86_pmu.eventsel + idx, val);
|
||||
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -821,15 +914,10 @@ static inline void x86_assign_hw_event(struct perf_event *event,
|
||||
hwc->event_base = 0;
|
||||
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
|
||||
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
|
||||
/*
|
||||
* We set it so that event_base + idx in wrmsr/rdmsr maps to
|
||||
* MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
|
||||
*/
|
||||
hwc->event_base =
|
||||
MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
|
||||
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
|
||||
} else {
|
||||
hwc->config_base = x86_pmu.eventsel;
|
||||
hwc->event_base = x86_pmu.perfctr;
|
||||
hwc->config_base = x86_pmu_config_addr(hwc->idx);
|
||||
hwc->event_base = x86_pmu_event_addr(hwc->idx);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -915,17 +1003,11 @@ static void x86_pmu_enable(struct pmu *pmu)
|
||||
x86_pmu.enable_all(added);
|
||||
}
|
||||
|
||||
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
|
||||
u64 enable_mask)
|
||||
{
|
||||
wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
|
||||
}
|
||||
|
||||
static inline void x86_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
wrmsrl(hwc->config_base + hwc->idx, hwc->config);
|
||||
wrmsrl(hwc->config_base, hwc->config);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
|
||||
@@ -978,7 +1060,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||
*/
|
||||
local64_set(&hwc->prev_count, (u64)-left);
|
||||
|
||||
wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
|
||||
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
|
||||
|
||||
/*
|
||||
* Due to erratum on certan cpu we need
|
||||
@@ -986,7 +1068,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
||||
* is updated properly
|
||||
*/
|
||||
if (x86_pmu.perfctr_second_write) {
|
||||
wrmsrl(hwc->event_base + idx,
|
||||
wrmsrl(hwc->event_base,
|
||||
(u64)(-left) & x86_pmu.cntval_mask);
|
||||
}
|
||||
|
||||
@@ -1113,8 +1195,8 @@ void perf_event_print_debug(void)
|
||||
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
|
||||
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
|
||||
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
|
||||
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
|
||||
|
||||
prev_left = per_cpu(pmc_prev_left[idx], cpu);
|
||||
|
||||
@@ -1389,7 +1471,7 @@ static void __init pmu_check_apic(void)
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
}
|
||||
|
||||
int __init init_hw_perf_events(void)
|
||||
static int __init init_hw_perf_events(void)
|
||||
{
|
||||
struct event_constraint *c;
|
||||
int err;
|
||||
@@ -1608,7 +1690,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int x86_pmu_event_init(struct perf_event *event)
|
||||
static int x86_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
struct pmu *tmp;
|
||||
int err;
|
||||
|
@@ -127,6 +127,11 @@ static int amd_pmu_hw_config(struct perf_event *event)
|
||||
/*
|
||||
* AMD64 events are detected based on their event codes.
|
||||
*/
|
||||
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
|
||||
{
|
||||
return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
|
||||
}
|
||||
|
||||
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
|
||||
{
|
||||
return (hwc->config & 0xe0) == 0xe0;
|
||||
@@ -385,13 +390,181 @@ static __initconst const struct x86_pmu amd_pmu = {
|
||||
.cpu_dead = amd_pmu_cpu_dead,
|
||||
};
|
||||
|
||||
/* AMD Family 15h */
|
||||
|
||||
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
|
||||
|
||||
#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
|
||||
#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
|
||||
#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
|
||||
#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
|
||||
#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
|
||||
#define AMD_EVENT_EX_LS 0x000000C0ULL
|
||||
#define AMD_EVENT_DE 0x000000D0ULL
|
||||
#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
|
||||
|
||||
/*
|
||||
* AMD family 15h event code/PMC mappings:
|
||||
*
|
||||
* type = event_code & 0x0F0:
|
||||
*
|
||||
* 0x000 FP PERF_CTL[5:3]
|
||||
* 0x010 FP PERF_CTL[5:3]
|
||||
* 0x020 LS PERF_CTL[5:0]
|
||||
* 0x030 LS PERF_CTL[5:0]
|
||||
* 0x040 DC PERF_CTL[5:0]
|
||||
* 0x050 DC PERF_CTL[5:0]
|
||||
* 0x060 CU PERF_CTL[2:0]
|
||||
* 0x070 CU PERF_CTL[2:0]
|
||||
* 0x080 IC/DE PERF_CTL[2:0]
|
||||
* 0x090 IC/DE PERF_CTL[2:0]
|
||||
* 0x0A0 ---
|
||||
* 0x0B0 ---
|
||||
* 0x0C0 EX/LS PERF_CTL[5:0]
|
||||
* 0x0D0 DE PERF_CTL[2:0]
|
||||
* 0x0E0 NB NB_PERF_CTL[3:0]
|
||||
* 0x0F0 NB NB_PERF_CTL[3:0]
|
||||
*
|
||||
* Exceptions:
|
||||
*
|
||||
* 0x003 FP PERF_CTL[3]
|
||||
* 0x00B FP PERF_CTL[3]
|
||||
* 0x00D FP PERF_CTL[3]
|
||||
* 0x023 DE PERF_CTL[2:0]
|
||||
* 0x02D LS PERF_CTL[3]
|
||||
* 0x02E LS PERF_CTL[3,0]
|
||||
* 0x043 CU PERF_CTL[2:0]
|
||||
* 0x045 CU PERF_CTL[2:0]
|
||||
* 0x046 CU PERF_CTL[2:0]
|
||||
* 0x054 CU PERF_CTL[2:0]
|
||||
* 0x055 CU PERF_CTL[2:0]
|
||||
* 0x08F IC PERF_CTL[0]
|
||||
* 0x187 DE PERF_CTL[0]
|
||||
* 0x188 DE PERF_CTL[0]
|
||||
* 0x0DB EX PERF_CTL[5:0]
|
||||
* 0x0DC LS PERF_CTL[5:0]
|
||||
* 0x0DD LS PERF_CTL[5:0]
|
||||
* 0x0DE LS PERF_CTL[5:0]
|
||||
* 0x0DF LS PERF_CTL[5:0]
|
||||
* 0x1D6 EX PERF_CTL[5:0]
|
||||
* 0x1D8 EX PERF_CTL[5:0]
|
||||
*/
|
||||
|
||||
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
|
||||
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
|
||||
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
|
||||
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
|
||||
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
|
||||
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
|
||||
|
||||
static struct event_constraint *
|
||||
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
unsigned int event_code = amd_get_event_code(&event->hw);
|
||||
|
||||
switch (event_code & AMD_EVENT_TYPE_MASK) {
|
||||
case AMD_EVENT_FP:
|
||||
switch (event_code) {
|
||||
case 0x003:
|
||||
case 0x00B:
|
||||
case 0x00D:
|
||||
return &amd_f15_PMC3;
|
||||
default:
|
||||
return &amd_f15_PMC53;
|
||||
}
|
||||
case AMD_EVENT_LS:
|
||||
case AMD_EVENT_DC:
|
||||
case AMD_EVENT_EX_LS:
|
||||
switch (event_code) {
|
||||
case 0x023:
|
||||
case 0x043:
|
||||
case 0x045:
|
||||
case 0x046:
|
||||
case 0x054:
|
||||
case 0x055:
|
||||
return &amd_f15_PMC20;
|
||||
case 0x02D:
|
||||
return &amd_f15_PMC3;
|
||||
case 0x02E:
|
||||
return &amd_f15_PMC30;
|
||||
default:
|
||||
return &amd_f15_PMC50;
|
||||
}
|
||||
case AMD_EVENT_CU:
|
||||
case AMD_EVENT_IC_DE:
|
||||
case AMD_EVENT_DE:
|
||||
switch (event_code) {
|
||||
case 0x08F:
|
||||
case 0x187:
|
||||
case 0x188:
|
||||
return &amd_f15_PMC0;
|
||||
case 0x0DB ... 0x0DF:
|
||||
case 0x1D6:
|
||||
case 0x1D8:
|
||||
return &amd_f15_PMC50;
|
||||
default:
|
||||
return &amd_f15_PMC20;
|
||||
}
|
||||
case AMD_EVENT_NB:
|
||||
/* not yet implemented */
|
||||
return &emptyconstraint;
|
||||
default:
|
||||
return &emptyconstraint;
|
||||
}
|
||||
}
|
||||
|
||||
static __initconst const struct x86_pmu amd_pmu_f15h = {
|
||||
.name = "AMD Family 15h",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.hw_config = amd_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_F15H_PERF_CTL,
|
||||
.perfctr = MSR_F15H_PERF_CTR,
|
||||
.event_map = amd_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||
.num_counters = 6,
|
||||
.cntval_bits = 48,
|
||||
.cntval_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_constraints = amd_get_event_constraints_f15h,
|
||||
/* nortbridge counters not yet implemented: */
|
||||
#if 0
|
||||
.put_event_constraints = amd_put_event_constraints,
|
||||
|
||||
.cpu_prepare = amd_pmu_cpu_prepare,
|
||||
.cpu_starting = amd_pmu_cpu_starting,
|
||||
.cpu_dead = amd_pmu_cpu_dead,
|
||||
#endif
|
||||
};
|
||||
|
||||
static __init int amd_pmu_init(void)
|
||||
{
|
||||
/* Performance-monitoring supported from K7 and later: */
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return -ENODEV;
|
||||
|
||||
x86_pmu = amd_pmu;
|
||||
/*
|
||||
* If core performance counter extensions exists, it must be
|
||||
* family 15h, otherwise fail. See x86_pmu_addr_offset().
|
||||
*/
|
||||
switch (boot_cpu_data.x86) {
|
||||
case 0x15:
|
||||
if (!cpu_has_perfctr_core)
|
||||
return -ENODEV;
|
||||
x86_pmu = amd_pmu_f15h;
|
||||
break;
|
||||
default:
|
||||
if (cpu_has_perfctr_core)
|
||||
return -ENODEV;
|
||||
x86_pmu = amd_pmu;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Events are common for all AMDs */
|
||||
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
||||
|
@@ -1,5 +1,27 @@
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
#define MAX_EXTRA_REGS 2
|
||||
|
||||
/*
|
||||
* Per register state.
|
||||
*/
|
||||
struct er_account {
|
||||
int ref; /* reference count */
|
||||
unsigned int extra_reg; /* extra MSR number */
|
||||
u64 extra_config; /* extra MSR config */
|
||||
};
|
||||
|
||||
/*
|
||||
* Per core state
|
||||
* This used to coordinate shared registers for HT threads.
|
||||
*/
|
||||
struct intel_percore {
|
||||
raw_spinlock_t lock; /* protect structure */
|
||||
struct er_account regs[MAX_EXTRA_REGS];
|
||||
int refcnt; /* number of threads */
|
||||
unsigned core_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* Intel PerfMon, used on Core and later.
|
||||
*/
|
||||
@@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] =
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_nehalem_extra_regs[] =
|
||||
{
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_nehalem_percore_constraints[] =
|
||||
{
|
||||
INTEL_EVENT_CONSTRAINT(0xb7, 0),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_westmere_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
@@ -76,6 +110,33 @@ static struct event_constraint intel_westmere_event_constraints[] =
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_snb_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
|
||||
INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_westmere_extra_regs[] =
|
||||
{
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_westmere_percore_constraints[] =
|
||||
{
|
||||
INTEL_EVENT_CONSTRAINT(0xb7, 0),
|
||||
INTEL_EVENT_CONSTRAINT(0xbb, 0),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_gen_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
@@ -89,6 +150,106 @@ static u64 intel_pmu_event_map(int hw_event)
|
||||
return intel_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
static __initconst const u64 snb_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
|
||||
[ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
/*
|
||||
* TBD: Need Off-core Response Performance Monitoring support
|
||||
*/
|
||||
[ C(OP_READ) ] = {
|
||||
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01bb,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
/* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01bb,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01bb,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
|
||||
[ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
|
||||
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
||||
[ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static __initconst const u64 westmere_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
@@ -124,16 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
|
||||
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01bb,
|
||||
},
|
||||
/*
|
||||
* Use RFO, not WRITEBACK, because a write miss would typically occur
|
||||
* on RFO.
|
||||
*/
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
|
||||
[ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
|
||||
/* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01bb,
|
||||
/* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01b7,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
|
||||
[ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
|
||||
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01bb,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
@@ -180,6 +351,39 @@ static __initconst const u64 westmere_hw_cache_event_ids
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
|
||||
*/
|
||||
|
||||
#define DMND_DATA_RD (1 << 0)
|
||||
#define DMND_RFO (1 << 1)
|
||||
#define DMND_WB (1 << 3)
|
||||
#define PF_DATA_RD (1 << 4)
|
||||
#define PF_DATA_RFO (1 << 5)
|
||||
#define RESP_UNCORE_HIT (1 << 8)
|
||||
#define RESP_MISS (0xf600) /* non uncore hit */
|
||||
|
||||
static __initconst const u64 nehalem_hw_cache_extra_regs
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
|
||||
[ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
|
||||
[ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
|
||||
[ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static __initconst const u64 nehalem_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
@@ -215,16 +419,26 @@ static __initconst const u64 nehalem_hw_cache_event_ids
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
|
||||
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01b7,
|
||||
},
|
||||
/*
|
||||
* Use RFO, not WRITEBACK, because a write miss would typically occur
|
||||
* on RFO.
|
||||
*/
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
|
||||
[ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
|
||||
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01b7,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
|
||||
[ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
|
||||
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
|
||||
[ C(RESULT_ACCESS) ] = 0x01b7,
|
||||
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
|
||||
[ C(RESULT_MISS) ] = 0x01b7,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
@@ -691,8 +905,8 @@ static void intel_pmu_reset(void)
|
||||
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
||||
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
||||
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
|
||||
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
|
||||
}
|
||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
||||
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||
@@ -793,6 +1007,67 @@ intel_bts_constraints(struct perf_event *event)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
struct event_constraint *c;
|
||||
struct intel_percore *pc;
|
||||
struct er_account *era;
|
||||
int i;
|
||||
int free_slot;
|
||||
int found;
|
||||
|
||||
if (!x86_pmu.percore_constraints || hwc->extra_alloc)
|
||||
return NULL;
|
||||
|
||||
for (c = x86_pmu.percore_constraints; c->cmask; c++) {
|
||||
if (e != c->code)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Allocate resource per core.
|
||||
*/
|
||||
pc = cpuc->per_core;
|
||||
if (!pc)
|
||||
break;
|
||||
c = &emptyconstraint;
|
||||
raw_spin_lock(&pc->lock);
|
||||
free_slot = -1;
|
||||
found = 0;
|
||||
for (i = 0; i < MAX_EXTRA_REGS; i++) {
|
||||
era = &pc->regs[i];
|
||||
if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
|
||||
/* Allow sharing same config */
|
||||
if (hwc->extra_config == era->extra_config) {
|
||||
era->ref++;
|
||||
cpuc->percore_used = 1;
|
||||
hwc->extra_alloc = 1;
|
||||
c = NULL;
|
||||
}
|
||||
/* else conflict */
|
||||
found = 1;
|
||||
break;
|
||||
} else if (era->ref == 0 && free_slot == -1)
|
||||
free_slot = i;
|
||||
}
|
||||
if (!found && free_slot != -1) {
|
||||
era = &pc->regs[free_slot];
|
||||
era->ref = 1;
|
||||
era->extra_reg = hwc->extra_reg;
|
||||
era->extra_config = hwc->extra_config;
|
||||
cpuc->percore_used = 1;
|
||||
hwc->extra_alloc = 1;
|
||||
c = NULL;
|
||||
}
|
||||
raw_spin_unlock(&pc->lock);
|
||||
return c;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
@@ -806,9 +1081,51 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
|
||||
if (c)
|
||||
return c;
|
||||
|
||||
c = intel_percore_constraints(cpuc, event);
|
||||
if (c)
|
||||
return c;
|
||||
|
||||
return x86_get_event_constraints(cpuc, event);
|
||||
}
|
||||
|
||||
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct extra_reg *er;
|
||||
struct intel_percore *pc;
|
||||
struct er_account *era;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int i, allref;
|
||||
|
||||
if (!cpuc->percore_used)
|
||||
return;
|
||||
|
||||
for (er = x86_pmu.extra_regs; er->msr; er++) {
|
||||
if (er->event != (hwc->config & er->config_mask))
|
||||
continue;
|
||||
|
||||
pc = cpuc->per_core;
|
||||
raw_spin_lock(&pc->lock);
|
||||
for (i = 0; i < MAX_EXTRA_REGS; i++) {
|
||||
era = &pc->regs[i];
|
||||
if (era->ref > 0 &&
|
||||
era->extra_config == hwc->extra_config &&
|
||||
era->extra_reg == er->msr) {
|
||||
era->ref--;
|
||||
hwc->extra_alloc = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
allref = 0;
|
||||
for (i = 0; i < MAX_EXTRA_REGS; i++)
|
||||
allref += pc->regs[i].ref;
|
||||
if (allref == 0)
|
||||
cpuc->percore_used = 0;
|
||||
raw_spin_unlock(&pc->lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
@@ -880,20 +1197,67 @@ static __initconst const struct x86_pmu core_pmu = {
|
||||
*/
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.get_event_constraints = intel_get_event_constraints,
|
||||
.put_event_constraints = intel_put_event_constraints,
|
||||
.event_constraints = intel_core_event_constraints,
|
||||
};
|
||||
|
||||
static int intel_pmu_cpu_prepare(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
if (!cpu_has_ht_siblings())
|
||||
return NOTIFY_OK;
|
||||
|
||||
cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!cpuc->per_core)
|
||||
return NOTIFY_BAD;
|
||||
|
||||
raw_spin_lock_init(&cpuc->per_core->lock);
|
||||
cpuc->per_core->core_id = -1;
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_starting(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
int core_id = topology_core_id(cpu);
|
||||
int i;
|
||||
|
||||
init_debug_store_on_cpu(cpu);
|
||||
/*
|
||||
* Deal with CPUs that don't clear their LBRs on power-up.
|
||||
*/
|
||||
intel_pmu_lbr_reset();
|
||||
|
||||
if (!cpu_has_ht_siblings())
|
||||
return;
|
||||
|
||||
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
||||
struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
|
||||
|
||||
if (pc && pc->core_id == core_id) {
|
||||
kfree(cpuc->per_core);
|
||||
cpuc->per_core = pc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cpuc->per_core->core_id = core_id;
|
||||
cpuc->per_core->refcnt++;
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dying(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
struct intel_percore *pc = cpuc->per_core;
|
||||
|
||||
if (pc) {
|
||||
if (pc->core_id == -1 || --pc->refcnt == 0)
|
||||
kfree(pc);
|
||||
cpuc->per_core = NULL;
|
||||
}
|
||||
|
||||
fini_debug_store_on_cpu(cpu);
|
||||
}
|
||||
|
||||
@@ -918,7 +1282,9 @@ static __initconst const struct x86_pmu intel_pmu = {
|
||||
*/
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.get_event_constraints = intel_get_event_constraints,
|
||||
.put_event_constraints = intel_put_event_constraints,
|
||||
|
||||
.cpu_prepare = intel_pmu_cpu_prepare,
|
||||
.cpu_starting = intel_pmu_cpu_starting,
|
||||
.cpu_dying = intel_pmu_cpu_dying,
|
||||
};
|
||||
@@ -1024,6 +1390,7 @@ static __init int intel_pmu_init(void)
|
||||
intel_pmu_lbr_init_core();
|
||||
|
||||
x86_pmu.event_constraints = intel_core2_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
|
||||
pr_cont("Core2 events, ");
|
||||
break;
|
||||
|
||||
@@ -1032,11 +1399,16 @@ static __init int intel_pmu_init(void)
|
||||
case 46: /* 45 nm nehalem-ex, "Beckton" */
|
||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
||||
sizeof(hw_cache_extra_regs));
|
||||
|
||||
intel_pmu_lbr_init_nhm();
|
||||
|
||||
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
|
||||
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
|
||||
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||
x86_pmu.extra_regs = intel_nehalem_extra_regs;
|
||||
pr_cont("Nehalem events, ");
|
||||
break;
|
||||
|
||||
@@ -1047,6 +1419,7 @@ static __init int intel_pmu_init(void)
|
||||
intel_pmu_lbr_init_atom();
|
||||
|
||||
x86_pmu.event_constraints = intel_gen_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
|
||||
pr_cont("Atom events, ");
|
||||
break;
|
||||
|
||||
@@ -1054,14 +1427,30 @@ static __init int intel_pmu_init(void)
|
||||
case 44: /* 32 nm nehalem, "Gulftown" */
|
||||
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
||||
sizeof(hw_cache_extra_regs));
|
||||
|
||||
intel_pmu_lbr_init_nhm();
|
||||
|
||||
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
||||
x86_pmu.percore_constraints = intel_westmere_percore_constraints;
|
||||
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
|
||||
x86_pmu.extra_regs = intel_westmere_extra_regs;
|
||||
pr_cont("Westmere events, ");
|
||||
break;
|
||||
|
||||
case 42: /* SandyBridge */
|
||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
intel_pmu_lbr_init_nhm();
|
||||
|
||||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_snb_pebs_events;
|
||||
pr_cont("SandyBridge events, ");
|
||||
break;
|
||||
|
||||
default:
|
||||
/*
|
||||
* default constraints for v2 and up
|
||||
|
@@ -361,30 +361,88 @@ static int intel_pmu_drain_bts_buffer(void)
|
||||
/*
|
||||
* PEBS
|
||||
*/
|
||||
|
||||
static struct event_constraint intel_core_pebs_events[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
|
||||
static struct event_constraint intel_core2_pebs_event_constraints[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
|
||||
PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_nehalem_pebs_events[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
|
||||
PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||
static struct event_constraint intel_atom_pebs_event_constraints[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
|
||||
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
||||
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
||||
PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
|
||||
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
|
||||
PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
|
||||
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
|
||||
PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
|
||||
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
||||
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
|
||||
PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
|
||||
|
||||
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
|
||||
PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
|
||||
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_snb_pebs_events[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||
PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||
PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
|
||||
PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */
|
||||
PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */
|
||||
PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */
|
||||
PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */
|
||||
PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */
|
||||
PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */
|
||||
PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */
|
||||
PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
|
||||
PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
|
||||
PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
|
||||
PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */
|
||||
PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */
|
||||
PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */
|
||||
PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
|
||||
PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
|
||||
PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
|
||||
PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
|
||||
PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
|
||||
PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
|
||||
PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
|
||||
PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
|
||||
PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
|
||||
PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
|
||||
PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */
|
||||
PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
|
||||
PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
|
||||
PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */
|
||||
PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */
|
||||
PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@@ -695,20 +753,17 @@ static void intel_ds_init(void)
|
||||
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
|
||||
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
|
||||
x86_pmu.pebs_constraints = intel_core_pebs_events;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
|
||||
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
|
||||
x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
|
||||
x86_pmu.pebs = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -764,9 +764,9 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
|
||||
u64 v;
|
||||
|
||||
/* an official way for overflow indication */
|
||||
rdmsrl(hwc->config_base + hwc->idx, v);
|
||||
rdmsrl(hwc->config_base, v);
|
||||
if (v & P4_CCCR_OVF) {
|
||||
wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
|
||||
wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -815,7 +815,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
|
||||
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
|
||||
* asserted again and again
|
||||
*/
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
(void)checking_wrmsrl(hwc->config_base,
|
||||
(u64)(p4_config_unpack_cccr(hwc->config)) &
|
||||
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
|
||||
}
|
||||
@@ -885,7 +885,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
|
||||
p4_pmu_enable_pebs(hwc->config);
|
||||
|
||||
(void)checking_wrmsrl(escr_addr, escr_conf);
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
(void)checking_wrmsrl(hwc->config_base,
|
||||
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
|
||||
}
|
||||
|
||||
|
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event)
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||
(void)checking_wrmsrl(hwc->config_base, val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_event(struct perf_event *event)
|
||||
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||
(void)checking_wrmsrl(hwc->config_base, val);
|
||||
}
|
||||
|
||||
static __initconst const struct x86_pmu p6_pmu = {
|
||||
|
@@ -46,6 +46,8 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
|
||||
/* returns the bit offset of the performance counter register */
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if (msr >= MSR_F15H_PERF_CTR)
|
||||
return (msr - MSR_F15H_PERF_CTR) >> 1;
|
||||
return msr - MSR_K7_PERFCTR0;
|
||||
case X86_VENDOR_INTEL:
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||
@@ -70,6 +72,8 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
|
||||
/* returns the bit offset of the event selection register */
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if (msr >= MSR_F15H_PERF_CTL)
|
||||
return (msr - MSR_F15H_PERF_CTL) >> 1;
|
||||
return msr - MSR_K7_EVNTSEL0;
|
||||
case X86_VENDOR_INTEL:
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||
|
441
arch/x86/kernel/devicetree.c
Normal file
441
arch/x86/kernel/devicetree.c
Normal file
@@ -0,0 +1,441 @@
|
||||
/*
|
||||
* Architecture specific OF callbacks.
|
||||
*/
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/of_pci.h>
|
||||
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/irq_controller.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
__initdata u64 initial_dtb;
|
||||
char __initdata cmd_line[COMMAND_LINE_SIZE];
|
||||
static LIST_HEAD(irq_domains);
|
||||
static DEFINE_RAW_SPINLOCK(big_irq_lock);
|
||||
|
||||
int __initdata of_ioapic;
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
static void add_interrupt_host(struct irq_domain *ih)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&big_irq_lock, flags);
|
||||
list_add(&ih->l, &irq_domains);
|
||||
raw_spin_unlock_irqrestore(&big_irq_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct irq_domain *get_ih_from_node(struct device_node *controller)
|
||||
{
|
||||
struct irq_domain *ih, *found = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&big_irq_lock, flags);
|
||||
list_for_each_entry(ih, &irq_domains, l) {
|
||||
if (ih->controller == controller) {
|
||||
found = ih;
|
||||
break;
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&big_irq_lock, flags);
|
||||
return found;
|
||||
}
|
||||
|
||||
unsigned int irq_create_of_mapping(struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize)
|
||||
{
|
||||
struct irq_domain *ih;
|
||||
u32 virq, type;
|
||||
int ret;
|
||||
|
||||
ih = get_ih_from_node(controller);
|
||||
if (!ih)
|
||||
return 0;
|
||||
ret = ih->xlate(ih, intspec, intsize, &virq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
return virq;
|
||||
/* set the mask if it is different from current */
|
||||
if (type == (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
|
||||
set_irq_type(virq, type);
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
|
||||
|
||||
unsigned long pci_address_to_pio(phys_addr_t address)
|
||||
{
|
||||
/*
|
||||
* The ioport address can be directly used by inX / outX
|
||||
*/
|
||||
BUG_ON(address >= (1 << 16));
|
||||
return (unsigned long)address;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_address_to_pio);
|
||||
|
||||
void __init early_init_dt_scan_chosen_arch(unsigned long node)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
|
||||
{
|
||||
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
void __init add_dtb(u64 data)
|
||||
{
|
||||
initial_dtb = data + offsetof(struct setup_data, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* CE4100 ids. Will be moved to machine_device_initcall() once we have it.
|
||||
*/
|
||||
static struct of_device_id __initdata ce4100_ids[] = {
|
||||
{ .compatible = "intel,ce4100-cp", },
|
||||
{ .compatible = "isa", },
|
||||
{ .compatible = "pci", },
|
||||
{},
|
||||
};
|
||||
|
||||
static int __init add_bus_probe(void)
|
||||
{
|
||||
if (!of_have_populated_dt())
|
||||
return 0;
|
||||
|
||||
return of_platform_bus_probe(NULL, ce4100_ids, NULL);
|
||||
}
|
||||
module_init(add_bus_probe);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static int x86_of_pci_irq_enable(struct pci_dev *dev)
|
||||
{
|
||||
struct of_irq oirq;
|
||||
u32 virq;
|
||||
int ret;
|
||||
u8 pin;
|
||||
|
||||
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!pin)
|
||||
return 0;
|
||||
|
||||
ret = of_irq_map_pci(dev, &oirq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
|
||||
oirq.size);
|
||||
if (virq == 0)
|
||||
return -EINVAL;
|
||||
dev->irq = virq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void x86_of_pci_irq_disable(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
|
||||
void __cpuinit x86_of_pci_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
pcibios_enable_irq = x86_of_pci_irq_enable;
|
||||
pcibios_disable_irq = x86_of_pci_irq_disable;
|
||||
|
||||
for_each_node_by_type(np, "pci") {
|
||||
const void *prop;
|
||||
struct pci_bus *bus;
|
||||
unsigned int bus_min;
|
||||
struct device_node *child;
|
||||
|
||||
prop = of_get_property(np, "bus-range", NULL);
|
||||
if (!prop)
|
||||
continue;
|
||||
bus_min = be32_to_cpup(prop);
|
||||
|
||||
bus = pci_find_bus(0, bus_min);
|
||||
if (!bus) {
|
||||
printk(KERN_ERR "Can't find a node for bus %s.\n",
|
||||
np->full_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bus->self)
|
||||
bus->self->dev.of_node = np;
|
||||
else
|
||||
bus->dev.of_node = np;
|
||||
|
||||
for_each_child_of_node(np, child) {
|
||||
struct pci_dev *dev;
|
||||
u32 devfn;
|
||||
|
||||
prop = of_get_property(child, "reg", NULL);
|
||||
if (!prop)
|
||||
continue;
|
||||
|
||||
devfn = (be32_to_cpup(prop) >> 8) & 0xff;
|
||||
dev = pci_get_slot(bus, devfn);
|
||||
if (!dev)
|
||||
continue;
|
||||
dev->dev.of_node = child;
|
||||
pci_dev_put(dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init dtb_setup_hpet(void)
|
||||
{
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
struct device_node *dn;
|
||||
struct resource r;
|
||||
int ret;
|
||||
|
||||
dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-hpet");
|
||||
if (!dn)
|
||||
return;
|
||||
ret = of_address_to_resource(dn, 0, &r);
|
||||
if (ret) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
hpet_address = r.start;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init dtb_lapic_setup(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
struct device_node *dn;
|
||||
struct resource r;
|
||||
int ret;
|
||||
|
||||
dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic");
|
||||
if (!dn)
|
||||
return;
|
||||
|
||||
ret = of_address_to_resource(dn, 0, &r);
|
||||
if (WARN_ON(ret))
|
||||
return;
|
||||
|
||||
/* Did the boot loader setup the local APIC ? */
|
||||
if (!cpu_has_apic) {
|
||||
if (apic_force_enable(r.start))
|
||||
return;
|
||||
}
|
||||
smp_found_config = 1;
|
||||
pic_mode = 1;
|
||||
register_lapic_address(r.start);
|
||||
generic_processor_info(boot_cpu_physical_apicid,
|
||||
GET_APIC_VERSION(apic_read(APIC_LVR)));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
static unsigned int ioapic_id;
|
||||
|
||||
static void __init dtb_add_ioapic(struct device_node *dn)
|
||||
{
|
||||
struct resource r;
|
||||
int ret;
|
||||
|
||||
ret = of_address_to_resource(dn, 0, &r);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Can't obtain address from node %s.\n",
|
||||
dn->full_name);
|
||||
return;
|
||||
}
|
||||
mp_register_ioapic(++ioapic_id, r.start, gsi_top);
|
||||
}
|
||||
|
||||
static void __init dtb_ioapic_setup(void)
|
||||
{
|
||||
struct device_node *dn;
|
||||
|
||||
for_each_compatible_node(dn, NULL, "intel,ce4100-ioapic")
|
||||
dtb_add_ioapic(dn);
|
||||
|
||||
if (nr_ioapics) {
|
||||
of_ioapic = 1;
|
||||
return;
|
||||
}
|
||||
printk(KERN_ERR "Error: No information about IO-APIC in OF.\n");
|
||||
}
|
||||
#else
|
||||
static void __init dtb_ioapic_setup(void) {}
|
||||
#endif
|
||||
|
||||
static void __init dtb_apic_setup(void)
|
||||
{
|
||||
dtb_lapic_setup();
|
||||
dtb_ioapic_setup();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF_FLATTREE
|
||||
static void __init x86_flattree_get_config(void)
|
||||
{
|
||||
u32 size, map_len;
|
||||
void *new_dtb;
|
||||
|
||||
if (!initial_dtb)
|
||||
return;
|
||||
|
||||
map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK),
|
||||
(u64)sizeof(struct boot_param_header));
|
||||
|
||||
initial_boot_params = early_memremap(initial_dtb, map_len);
|
||||
size = be32_to_cpu(initial_boot_params->totalsize);
|
||||
if (map_len < size) {
|
||||
early_iounmap(initial_boot_params, map_len);
|
||||
initial_boot_params = early_memremap(initial_dtb, size);
|
||||
map_len = size;
|
||||
}
|
||||
|
||||
new_dtb = alloc_bootmem(size);
|
||||
memcpy(new_dtb, initial_boot_params, size);
|
||||
early_iounmap(initial_boot_params, map_len);
|
||||
|
||||
initial_boot_params = new_dtb;
|
||||
|
||||
/* root level address cells */
|
||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||
|
||||
unflatten_device_tree();
|
||||
}
|
||||
#else
|
||||
static inline void x86_flattree_get_config(void) { }
|
||||
#endif
|
||||
|
||||
void __init x86_dtb_init(void)
|
||||
{
|
||||
x86_flattree_get_config();
|
||||
|
||||
if (!of_have_populated_dt())
|
||||
return;
|
||||
|
||||
dtb_setup_hpet();
|
||||
dtb_apic_setup();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
|
||||
struct of_ioapic_type {
|
||||
u32 out_type;
|
||||
u32 trigger;
|
||||
u32 polarity;
|
||||
};
|
||||
|
||||
static struct of_ioapic_type of_ioapic_type[] =
|
||||
{
|
||||
{
|
||||
.out_type = IRQ_TYPE_EDGE_RISING,
|
||||
.trigger = IOAPIC_EDGE,
|
||||
.polarity = 1,
|
||||
},
|
||||
{
|
||||
.out_type = IRQ_TYPE_LEVEL_LOW,
|
||||
.trigger = IOAPIC_LEVEL,
|
||||
.polarity = 0,
|
||||
},
|
||||
{
|
||||
.out_type = IRQ_TYPE_LEVEL_HIGH,
|
||||
.trigger = IOAPIC_LEVEL,
|
||||
.polarity = 1,
|
||||
},
|
||||
{
|
||||
.out_type = IRQ_TYPE_EDGE_FALLING,
|
||||
.trigger = IOAPIC_EDGE,
|
||||
.polarity = 0,
|
||||
},
|
||||
};
|
||||
|
||||
static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize,
|
||||
u32 *out_hwirq, u32 *out_type)
|
||||
{
|
||||
struct io_apic_irq_attr attr;
|
||||
struct of_ioapic_type *it;
|
||||
u32 line, idx, type;
|
||||
|
||||
if (intsize < 2)
|
||||
return -EINVAL;
|
||||
|
||||
line = *intspec;
|
||||
idx = (u32) id->priv;
|
||||
*out_hwirq = line + mp_gsi_routing[idx].gsi_base;
|
||||
|
||||
intspec++;
|
||||
type = *intspec;
|
||||
|
||||
if (type >= ARRAY_SIZE(of_ioapic_type))
|
||||
return -EINVAL;
|
||||
|
||||
it = of_ioapic_type + type;
|
||||
*out_type = it->out_type;
|
||||
|
||||
set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity);
|
||||
|
||||
return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr);
|
||||
}
|
||||
|
||||
static void __init ioapic_add_ofnode(struct device_node *np)
|
||||
{
|
||||
struct resource r;
|
||||
int i, ret;
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Failed to obtain address for %s\n",
|
||||
np->full_name);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
if (r.start == mp_ioapics[i].apicaddr) {
|
||||
struct irq_domain *id;
|
||||
|
||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||
BUG_ON(!id);
|
||||
id->controller = np;
|
||||
id->xlate = ioapic_xlate;
|
||||
id->priv = (void *)i;
|
||||
add_interrupt_host(id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
printk(KERN_ERR "IOxAPIC at %s is not registered.\n", np->full_name);
|
||||
}
|
||||
|
||||
void __init x86_add_irq_domains(void)
|
||||
{
|
||||
struct device_node *dp;
|
||||
|
||||
if (!of_have_populated_dt())
|
||||
return;
|
||||
|
||||
for_each_node_with_property(dp, "interrupt-controller") {
|
||||
if (of_device_is_compatible(dp, "intel,ce4100-ioapic"))
|
||||
ioapic_add_ofnode(dp);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void __init x86_add_irq_domains(void) { }
|
||||
#endif
|
@@ -320,31 +320,6 @@ void die(const char *str, struct pt_regs *regs, long err)
|
||||
oops_end(flags, regs, sig);
|
||||
}
|
||||
|
||||
void notrace __kprobes
|
||||
die_nmi(char *str, struct pt_regs *regs, int do_panic)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We are in trouble anyway, lets at least try
|
||||
* to get a message out.
|
||||
*/
|
||||
flags = oops_begin();
|
||||
printk(KERN_EMERG "%s", str);
|
||||
printk(" on CPU%d, ip %08lx, registers:\n",
|
||||
smp_processor_id(), regs->ip);
|
||||
show_registers(regs);
|
||||
oops_end(flags, regs, 0);
|
||||
if (do_panic || panic_on_oops)
|
||||
panic("Non maskable interrupt");
|
||||
nmi_exit();
|
||||
local_irq_enable();
|
||||
do_exit(SIGBUS);
|
||||
}
|
||||
|
||||
static int __init oops_setup(char *s)
|
||||
{
|
||||
if (!s)
|
||||
|
@@ -667,21 +667,15 @@ __init void e820_setup_gap(void)
|
||||
* boot_params.e820_map, others are passed via SETUP_E820_EXT node of
|
||||
* linked list of struct setup_data, which is parsed here.
|
||||
*/
|
||||
void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
|
||||
void __init parse_e820_ext(struct setup_data *sdata)
|
||||
{
|
||||
u32 map_len;
|
||||
int entries;
|
||||
struct e820entry *extmap;
|
||||
|
||||
entries = sdata->len / sizeof(struct e820entry);
|
||||
map_len = sdata->len + sizeof(struct setup_data);
|
||||
if (map_len > PAGE_SIZE)
|
||||
sdata = early_ioremap(pa_data, map_len);
|
||||
extmap = (struct e820entry *)(sdata->data);
|
||||
__append_e820_map(extmap, entries);
|
||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||
if (map_len > PAGE_SIZE)
|
||||
early_iounmap(sdata, map_len);
|
||||
printk(KERN_INFO "extended physical RAM map:\n");
|
||||
e820_print_map("extended");
|
||||
}
|
||||
@@ -847,15 +841,21 @@ static int __init parse_memopt(char *p)
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (!strcmp(p, "nopentium")) {
|
||||
#ifdef CONFIG_X86_32
|
||||
setup_clear_cpu_cap(X86_FEATURE_PSE);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n");
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
userdef = 1;
|
||||
mem_size = memparse(p, &p);
|
||||
/* don't remove all of memory when handling "mem={invalid}" param */
|
||||
if (mem_size == 0)
|
||||
return -EINVAL;
|
||||
e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
|
||||
|
||||
return 0;
|
||||
|
@@ -65,6 +65,8 @@
|
||||
#define sysexit_audit syscall_exit_work
|
||||
#endif
|
||||
|
||||
.section .entry.text, "ax"
|
||||
|
||||
/*
|
||||
* We use macros for low-level operations which need to be overridden
|
||||
* for paravirtualization. The following will never clobber any registers:
|
||||
@@ -395,7 +397,7 @@ sysenter_past_esp:
|
||||
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
|
||||
* pushed above; +8 corresponds to copy_thread's esp0 setting.
|
||||
*/
|
||||
pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
|
||||
pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
|
||||
CFI_REL_OFFSET eip, 0
|
||||
|
||||
pushl_cfi %eax
|
||||
@@ -788,7 +790,7 @@ ENDPROC(ptregs_clone)
|
||||
*/
|
||||
.section .init.rodata,"a"
|
||||
ENTRY(interrupt)
|
||||
.text
|
||||
.section .entry.text, "ax"
|
||||
.p2align 5
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
ENTRY(irq_entries_start)
|
||||
@@ -807,7 +809,7 @@ vector=FIRST_EXTERNAL_VECTOR
|
||||
.endif
|
||||
.previous
|
||||
.long 1b
|
||||
.text
|
||||
.section .entry.text, "ax"
|
||||
vector=vector+1
|
||||
.endif
|
||||
.endr
|
||||
@@ -1409,8 +1411,7 @@ END(general_protection)
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
ENTRY(async_page_fault)
|
||||
RING0_EC_FRAME
|
||||
pushl $do_async_page_fault
|
||||
CFI_ADJUST_CFA_OFFSET 4
|
||||
pushl_cfi $do_async_page_fault
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
END(apf_page_fault)
|
||||
|
@@ -61,6 +61,8 @@
|
||||
#define __AUDIT_ARCH_LE 0x40000000
|
||||
|
||||
.code64
|
||||
.section .entry.text, "ax"
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(mcount)
|
||||
@@ -744,7 +746,7 @@ END(stub_rt_sigreturn)
|
||||
*/
|
||||
.section .init.rodata,"a"
|
||||
ENTRY(interrupt)
|
||||
.text
|
||||
.section .entry.text
|
||||
.p2align 5
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
ENTRY(irq_entries_start)
|
||||
@@ -763,7 +765,7 @@ vector=FIRST_EXTERNAL_VECTOR
|
||||
.endif
|
||||
.previous
|
||||
.quad 1b
|
||||
.text
|
||||
.section .entry.text
|
||||
vector=vector+1
|
||||
.endif
|
||||
.endr
|
||||
@@ -975,9 +977,12 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR \
|
||||
x86_platform_ipi smp_x86_platform_ipi
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.irpc idx, "01234567"
|
||||
.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
|
||||
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
.if NUM_INVALIDATE_TLB_VECTORS > \idx
|
||||
apicinterrupt (INVALIDATE_TLB_VECTOR_START)+\idx \
|
||||
invalidate_interrupt\idx smp_invalidate_interrupt
|
||||
.endif
|
||||
.endr
|
||||
#endif
|
||||
|
||||
@@ -1248,7 +1253,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
||||
decl PER_CPU_VAR(irq_count)
|
||||
jmp error_exit
|
||||
CFI_ENDPROC
|
||||
END(do_hypervisor_callback)
|
||||
END(xen_do_hypervisor_callback)
|
||||
|
||||
/*
|
||||
* Hypervisor uses this for application faults while it executes.
|
||||
|
@@ -437,18 +437,19 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
return;
|
||||
}
|
||||
|
||||
trace.func = self_addr;
|
||||
trace.depth = current->curr_ret_stack + 1;
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
*parent = old;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ftrace_push_return_trace(old, self_addr, &trace.depth,
|
||||
frame_pointer) == -EBUSY) {
|
||||
*parent = old;
|
||||
return;
|
||||
}
|
||||
|
||||
trace.func = self_addr;
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
current->curr_ret_stack--;
|
||||
*parent = old;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
@@ -73,7 +73,7 @@ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
|
||||
*/
|
||||
KERNEL_PAGES = LOWMEM_PAGES
|
||||
|
||||
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
|
||||
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
|
||||
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
||||
|
||||
/*
|
||||
@@ -137,7 +137,7 @@ ENTRY(startup_32)
|
||||
movsl
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_OLPC_OPENFIRMWARE
|
||||
#ifdef CONFIG_OLPC
|
||||
/* save OFW's pgdir table for later use when calling into OFW */
|
||||
movl %cr3, %eax
|
||||
movl %eax, pa(olpc_ofw_pgd)
|
||||
@@ -623,7 +623,7 @@ ENTRY(initial_code)
|
||||
* BSS section
|
||||
*/
|
||||
__PAGE_ALIGNED_BSS
|
||||
.align PAGE_SIZE_asm
|
||||
.align PAGE_SIZE
|
||||
#ifdef CONFIG_X86_PAE
|
||||
initial_pg_pmd:
|
||||
.fill 1024*KPMDS,4,0
|
||||
@@ -644,7 +644,7 @@ ENTRY(swapper_pg_dir)
|
||||
#ifdef CONFIG_X86_PAE
|
||||
__PAGE_ALIGNED_DATA
|
||||
/* Page-aligned for the benefit of paravirt? */
|
||||
.align PAGE_SIZE_asm
|
||||
.align PAGE_SIZE
|
||||
ENTRY(initial_page_table)
|
||||
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
|
||||
# if KPMDS == 3
|
||||
@@ -662,7 +662,7 @@ ENTRY(initial_page_table)
|
||||
# else
|
||||
# error "Kernel PMDs should be 1, 2 or 3"
|
||||
# endif
|
||||
.align PAGE_SIZE_asm /* needs to be page-sized too */
|
||||
.align PAGE_SIZE /* needs to be page-sized too */
|
||||
#endif
|
||||
|
||||
.data
|
||||
|
@@ -503,7 +503,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
set_irq_data(irq, dev);
|
||||
irq_set_handler_data(irq, dev);
|
||||
|
||||
if (hpet_setup_msi_irq(irq))
|
||||
return -EINVAL;
|
||||
|
@@ -112,7 +112,7 @@ static void make_8259A_irq(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
io_apic_irqs &= ~(1<<irq);
|
||||
set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
|
||||
irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
|
||||
i8259A_chip.name);
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
@@ -14,22 +14,9 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <asm/syscalls.h>
|
||||
|
||||
/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
|
||||
static void set_bitmap(unsigned long *bitmap, unsigned int base,
|
||||
unsigned int extent, int new_value)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = base; i < base + extent; i++) {
|
||||
if (new_value)
|
||||
__set_bit(i, bitmap);
|
||||
else
|
||||
__clear_bit(i, bitmap);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* this changes the io permissions bitmap in the current task.
|
||||
*/
|
||||
@@ -69,7 +56,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
||||
*/
|
||||
tss = &per_cpu(init_tss, get_cpu());
|
||||
|
||||
set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
|
||||
if (turn_on)
|
||||
bitmap_clear(t->io_bitmap_ptr, from, num);
|
||||
else
|
||||
bitmap_set(t->io_bitmap_ptr, from, num);
|
||||
|
||||
/*
|
||||
* Search for a (possibly new) maximum. This is simple and stupid,
|
||||
|
@@ -44,9 +44,9 @@ void ack_bad_irq(unsigned int irq)
|
||||
|
||||
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
||||
/*
|
||||
* /proc/interrupts printing:
|
||||
* /proc/interrupts printing for arch specific interrupts
|
||||
*/
|
||||
static int show_other_interrupts(struct seq_file *p, int prec)
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
int j;
|
||||
|
||||
@@ -122,59 +122,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
unsigned long flags, any_count = 0;
|
||||
int i = *(loff_t *) v, j, prec;
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (i > nr_irqs)
|
||||
return 0;
|
||||
|
||||
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
|
||||
j *= 10;
|
||||
|
||||
if (i == nr_irqs)
|
||||
return show_other_interrupts(p, prec);
|
||||
|
||||
/* print header */
|
||||
if (i == 0) {
|
||||
seq_printf(p, "%*s", prec + 8, "");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%-8d", j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
desc = irq_to_desc(i);
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_irqs_cpu(i, j);
|
||||
action = desc->action;
|
||||
if (!action && !any_count)
|
||||
goto out;
|
||||
|
||||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
seq_printf(p, " %8s", desc->irq_data.chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
while ((action = action->next) != NULL)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
}
|
||||
|
||||
seq_putc(p, '\n');
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* /proc/stat helpers
|
||||
*/
|
||||
@@ -276,15 +223,6 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
|
||||
|
||||
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
unsigned int irq_create_of_mapping(struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize)
|
||||
{
|
||||
return intspec[0];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
@@ -293,6 +231,7 @@ void fixup_irqs(void)
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
struct irq_chip *chip;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
int break_affinity = 0;
|
||||
@@ -307,10 +246,10 @@ void fixup_irqs(void)
|
||||
/* interrupt's are disabled at this point */
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
data = &desc->irq_data;
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
affinity = data->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
cpumask_subset(affinity, cpu_online_mask)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
@@ -327,16 +266,17 @@ void fixup_irqs(void)
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask)
|
||||
data->chip->irq_mask(data);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
|
||||
chip->irq_mask(data);
|
||||
|
||||
if (data->chip->irq_set_affinity)
|
||||
data->chip->irq_set_affinity(data, affinity, true);
|
||||
if (chip->irq_set_affinity)
|
||||
chip->irq_set_affinity(data, affinity, true);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask)
|
||||
data->chip->irq_unmask(data);
|
||||
if (!irqd_can_move_in_process_context(data) && chip->irq_unmask)
|
||||
chip->irq_unmask(data);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
@@ -368,10 +308,11 @@ void fixup_irqs(void)
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
data = &desc->irq_data;
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (data->chip->irq_retrigger)
|
||||
data->chip->irq_retrigger(data);
|
||||
if (chip->irq_retrigger)
|
||||
chip->irq_retrigger(data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
}
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <asm/setup.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
/*
|
||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||
@@ -71,6 +72,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
|
||||
static struct irqaction fpu_irq = {
|
||||
.handler = math_error_irq,
|
||||
.name = "fpu",
|
||||
.flags = IRQF_NO_THREAD,
|
||||
};
|
||||
#endif
|
||||
|
||||
@@ -80,6 +82,7 @@ static struct irqaction fpu_irq = {
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.name = "cascade",
|
||||
.flags = IRQF_NO_THREAD,
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
@@ -110,13 +113,19 @@ void __init init_ISA_irqs(void)
|
||||
legacy_pic->init(0);
|
||||
|
||||
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
|
||||
set_irq_chip_and_handler_name(i, chip, handle_level_irq, name);
|
||||
irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We probably need a better place for this, but it works for
|
||||
* now ...
|
||||
*/
|
||||
x86_add_irq_domains();
|
||||
|
||||
/*
|
||||
* On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
|
||||
* If these IRQ's are handled by legacy interrupt-controllers like PIC,
|
||||
@@ -164,14 +173,77 @@ static void __init smp_intr_init(void)
|
||||
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
|
||||
|
||||
/* IPIs for invalidation */
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
|
||||
#define ALLOC_INVTLB_VEC(NR) \
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
|
||||
invalidate_interrupt##NR)
|
||||
|
||||
switch (NUM_INVALIDATE_TLB_VECTORS) {
|
||||
default:
|
||||
ALLOC_INVTLB_VEC(31);
|
||||
case 31:
|
||||
ALLOC_INVTLB_VEC(30);
|
||||
case 30:
|
||||
ALLOC_INVTLB_VEC(29);
|
||||
case 29:
|
||||
ALLOC_INVTLB_VEC(28);
|
||||
case 28:
|
||||
ALLOC_INVTLB_VEC(27);
|
||||
case 27:
|
||||
ALLOC_INVTLB_VEC(26);
|
||||
case 26:
|
||||
ALLOC_INVTLB_VEC(25);
|
||||
case 25:
|
||||
ALLOC_INVTLB_VEC(24);
|
||||
case 24:
|
||||
ALLOC_INVTLB_VEC(23);
|
||||
case 23:
|
||||
ALLOC_INVTLB_VEC(22);
|
||||
case 22:
|
||||
ALLOC_INVTLB_VEC(21);
|
||||
case 21:
|
||||
ALLOC_INVTLB_VEC(20);
|
||||
case 20:
|
||||
ALLOC_INVTLB_VEC(19);
|
||||
case 19:
|
||||
ALLOC_INVTLB_VEC(18);
|
||||
case 18:
|
||||
ALLOC_INVTLB_VEC(17);
|
||||
case 17:
|
||||
ALLOC_INVTLB_VEC(16);
|
||||
case 16:
|
||||
ALLOC_INVTLB_VEC(15);
|
||||
case 15:
|
||||
ALLOC_INVTLB_VEC(14);
|
||||
case 14:
|
||||
ALLOC_INVTLB_VEC(13);
|
||||
case 13:
|
||||
ALLOC_INVTLB_VEC(12);
|
||||
case 12:
|
||||
ALLOC_INVTLB_VEC(11);
|
||||
case 11:
|
||||
ALLOC_INVTLB_VEC(10);
|
||||
case 10:
|
||||
ALLOC_INVTLB_VEC(9);
|
||||
case 9:
|
||||
ALLOC_INVTLB_VEC(8);
|
||||
case 8:
|
||||
ALLOC_INVTLB_VEC(7);
|
||||
case 7:
|
||||
ALLOC_INVTLB_VEC(6);
|
||||
case 6:
|
||||
ALLOC_INVTLB_VEC(5);
|
||||
case 5:
|
||||
ALLOC_INVTLB_VEC(4);
|
||||
case 4:
|
||||
ALLOC_INVTLB_VEC(3);
|
||||
case 3:
|
||||
ALLOC_INVTLB_VEC(2);
|
||||
case 2:
|
||||
ALLOC_INVTLB_VEC(1);
|
||||
case 1:
|
||||
ALLOC_INVTLB_VEC(0);
|
||||
break;
|
||||
}
|
||||
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
@@ -243,7 +315,7 @@ void __init native_init_IRQ(void)
|
||||
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||
}
|
||||
|
||||
if (!acpi_ioapic)
|
||||
if (!acpi_ioapic && !of_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@@ -533,15 +533,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
|
||||
case DIE_NMIWATCHDOG:
|
||||
if (atomic_read(&kgdb_active) != -1) {
|
||||
/* KGDB CPU roundup: */
|
||||
kgdb_nmicallback(raw_smp_processor_id(), regs);
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
/* Enter debugger: */
|
||||
break;
|
||||
|
||||
case DIE_DEBUG:
|
||||
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
||||
if (user_mode(regs))
|
||||
|
@@ -1276,6 +1276,14 @@ static int __kprobes can_optimize(unsigned long paddr)
|
||||
if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Do not optimize in the entry code due to the unstable
|
||||
* stack handling.
|
||||
*/
|
||||
if ((paddr >= (unsigned long )__entry_text_start) &&
|
||||
(paddr < (unsigned long )__entry_text_end))
|
||||
return 0;
|
||||
|
||||
/* Check there is enough space for a relative jump. */
|
||||
if (size - offset < RELATIVEJUMP_SIZE)
|
||||
return 0;
|
||||
|
@@ -66,7 +66,6 @@ struct microcode_amd {
|
||||
unsigned int mpb[0];
|
||||
};
|
||||
|
||||
#define UCODE_MAX_SIZE 2048
|
||||
#define UCODE_CONTAINER_SECTION_HDR 8
|
||||
#define UCODE_CONTAINER_HEADER_SIZE 12
|
||||
|
||||
@@ -77,20 +76,20 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
u32 dummy;
|
||||
|
||||
memset(csig, 0, sizeof(*csig));
|
||||
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
|
||||
pr_warning("microcode: CPU%d: AMD CPU family 0x%x not "
|
||||
"supported\n", cpu, c->x86);
|
||||
pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
|
||||
return -1;
|
||||
}
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy);
|
||||
pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev);
|
||||
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_matching_microcode(int cpu, void *mc, int rev)
|
||||
static int get_matching_microcode(int cpu, struct microcode_header_amd *mc_hdr,
|
||||
int rev)
|
||||
{
|
||||
struct microcode_header_amd *mc_header = mc;
|
||||
unsigned int current_cpu_id;
|
||||
u16 equiv_cpu_id = 0;
|
||||
unsigned int i = 0;
|
||||
@@ -109,17 +108,17 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
|
||||
if (!equiv_cpu_id)
|
||||
return 0;
|
||||
|
||||
if (mc_header->processor_rev_id != equiv_cpu_id)
|
||||
if (mc_hdr->processor_rev_id != equiv_cpu_id)
|
||||
return 0;
|
||||
|
||||
/* ucode might be chipset specific -- currently we don't support this */
|
||||
if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
|
||||
pr_err("CPU%d: loading of chipset specific code not yet supported\n",
|
||||
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
|
||||
pr_err("CPU%d: chipset specific code not yet supported\n",
|
||||
cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mc_header->patch_id <= rev)
|
||||
if (mc_hdr->patch_id <= rev)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
@@ -144,71 +143,93 @@ static int apply_microcode_amd(int cpu)
|
||||
|
||||
/* check current patch id and patch's id for match */
|
||||
if (rev != mc_amd->hdr.patch_id) {
|
||||
pr_err("CPU%d: update failed (for patch_level=0x%x)\n",
|
||||
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
|
||||
cpu, mc_amd->hdr.patch_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev);
|
||||
pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
|
||||
uci->cpu_sig.rev = rev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
|
||||
static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
|
||||
{
|
||||
unsigned int total_size;
|
||||
u8 section_hdr[UCODE_CONTAINER_SECTION_HDR];
|
||||
void *mc;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
unsigned int max_size, actual_size;
|
||||
|
||||
get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR);
|
||||
#define F1XH_MPB_MAX_SIZE 2048
|
||||
#define F14H_MPB_MAX_SIZE 1824
|
||||
#define F15H_MPB_MAX_SIZE 4096
|
||||
|
||||
if (section_hdr[0] != UCODE_UCODE_TYPE) {
|
||||
pr_err("error: invalid type field in container file section header\n");
|
||||
return NULL;
|
||||
switch (c->x86) {
|
||||
case 0x14:
|
||||
max_size = F14H_MPB_MAX_SIZE;
|
||||
break;
|
||||
case 0x15:
|
||||
max_size = F15H_MPB_MAX_SIZE;
|
||||
break;
|
||||
default:
|
||||
max_size = F1XH_MPB_MAX_SIZE;
|
||||
break;
|
||||
}
|
||||
|
||||
total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
|
||||
actual_size = buf[4] + (buf[5] << 8);
|
||||
|
||||
if (total_size > size || total_size > UCODE_MAX_SIZE) {
|
||||
pr_err("error: size mismatch\n");
|
||||
return NULL;
|
||||
if (actual_size > size || actual_size > max_size) {
|
||||
pr_err("section size mismatch\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
mc = vzalloc(UCODE_MAX_SIZE);
|
||||
return actual_size;
|
||||
}
|
||||
|
||||
static struct microcode_header_amd *
|
||||
get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size)
|
||||
{
|
||||
struct microcode_header_amd *mc = NULL;
|
||||
unsigned int actual_size = 0;
|
||||
|
||||
if (buf[0] != UCODE_UCODE_TYPE) {
|
||||
pr_err("invalid type field in container file section header\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
actual_size = verify_ucode_size(cpu, buf, size);
|
||||
if (!actual_size)
|
||||
goto out;
|
||||
|
||||
mc = vzalloc(actual_size);
|
||||
if (!mc)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, total_size);
|
||||
*mc_size = total_size + UCODE_CONTAINER_SECTION_HDR;
|
||||
get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size);
|
||||
*mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR;
|
||||
|
||||
out:
|
||||
return mc;
|
||||
}
|
||||
|
||||
static int install_equiv_cpu_table(const u8 *buf)
|
||||
{
|
||||
u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE];
|
||||
unsigned int *buf_pos = (unsigned int *)container_hdr;
|
||||
unsigned long size;
|
||||
unsigned int *ibuf = (unsigned int *)buf;
|
||||
unsigned int type = ibuf[1];
|
||||
unsigned int size = ibuf[2];
|
||||
|
||||
get_ucode_data(&container_hdr, buf, UCODE_CONTAINER_HEADER_SIZE);
|
||||
|
||||
size = buf_pos[2];
|
||||
|
||||
if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
|
||||
pr_err("error: invalid type field in container file section header\n");
|
||||
return 0;
|
||||
if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
|
||||
pr_err("empty section/"
|
||||
"invalid type field in container file section header\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
equiv_cpu_table = vmalloc(size);
|
||||
if (!equiv_cpu_table) {
|
||||
pr_err("failed to allocate equivalent CPU table\n");
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
buf += UCODE_CONTAINER_HEADER_SIZE;
|
||||
get_ucode_data(equiv_cpu_table, buf, size);
|
||||
get_ucode_data(equiv_cpu_table, buf + UCODE_CONTAINER_HEADER_SIZE, size);
|
||||
|
||||
return size + UCODE_CONTAINER_HEADER_SIZE; /* add header length */
|
||||
}
|
||||
@@ -223,16 +244,16 @@ static enum ucode_state
|
||||
generic_load_microcode(int cpu, const u8 *data, size_t size)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
struct microcode_header_amd *mc_hdr = NULL;
|
||||
unsigned int mc_size, leftover;
|
||||
int offset;
|
||||
const u8 *ucode_ptr = data;
|
||||
void *new_mc = NULL;
|
||||
void *mc;
|
||||
int new_rev = uci->cpu_sig.rev;
|
||||
unsigned int leftover;
|
||||
unsigned long offset;
|
||||
unsigned int new_rev = uci->cpu_sig.rev;
|
||||
enum ucode_state state = UCODE_OK;
|
||||
|
||||
offset = install_equiv_cpu_table(ucode_ptr);
|
||||
if (!offset) {
|
||||
if (offset < 0) {
|
||||
pr_err("failed to create equivalent cpu table\n");
|
||||
return UCODE_ERROR;
|
||||
}
|
||||
@@ -241,64 +262,65 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
|
||||
leftover = size - offset;
|
||||
|
||||
while (leftover) {
|
||||
unsigned int uninitialized_var(mc_size);
|
||||
struct microcode_header_amd *mc_header;
|
||||
|
||||
mc = get_next_ucode(ucode_ptr, leftover, &mc_size);
|
||||
if (!mc)
|
||||
mc_hdr = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size);
|
||||
if (!mc_hdr)
|
||||
break;
|
||||
|
||||
mc_header = (struct microcode_header_amd *)mc;
|
||||
if (get_matching_microcode(cpu, mc, new_rev)) {
|
||||
if (get_matching_microcode(cpu, mc_hdr, new_rev)) {
|
||||
vfree(new_mc);
|
||||
new_rev = mc_header->patch_id;
|
||||
new_mc = mc;
|
||||
new_rev = mc_hdr->patch_id;
|
||||
new_mc = mc_hdr;
|
||||
} else
|
||||
vfree(mc);
|
||||
vfree(mc_hdr);
|
||||
|
||||
ucode_ptr += mc_size;
|
||||
leftover -= mc_size;
|
||||
}
|
||||
|
||||
if (new_mc) {
|
||||
if (!leftover) {
|
||||
vfree(uci->mc);
|
||||
uci->mc = new_mc;
|
||||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||
cpu, new_rev, uci->cpu_sig.rev);
|
||||
} else {
|
||||
vfree(new_mc);
|
||||
state = UCODE_ERROR;
|
||||
}
|
||||
} else
|
||||
if (!new_mc) {
|
||||
state = UCODE_NFOUND;
|
||||
goto free_table;
|
||||
}
|
||||
|
||||
if (!leftover) {
|
||||
vfree(uci->mc);
|
||||
uci->mc = new_mc;
|
||||
pr_debug("CPU%d update ucode (0x%08x -> 0x%08x)\n",
|
||||
cpu, uci->cpu_sig.rev, new_rev);
|
||||
} else {
|
||||
vfree(new_mc);
|
||||
state = UCODE_ERROR;
|
||||
}
|
||||
|
||||
free_table:
|
||||
free_equiv_cpu_table();
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
static enum ucode_state request_microcode_fw(int cpu, struct device *device)
|
||||
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
|
||||
{
|
||||
const char *fw_name = "amd-ucode/microcode_amd.bin";
|
||||
const struct firmware *firmware;
|
||||
enum ucode_state ret;
|
||||
const struct firmware *fw;
|
||||
enum ucode_state ret = UCODE_NFOUND;
|
||||
|
||||
if (request_firmware(&firmware, fw_name, device)) {
|
||||
printk(KERN_ERR "microcode: failed to load file %s\n", fw_name);
|
||||
return UCODE_NFOUND;
|
||||
if (request_firmware(&fw, fw_name, device)) {
|
||||
pr_err("failed to load file %s\n", fw_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (*(u32 *)firmware->data != UCODE_MAGIC) {
|
||||
pr_err("invalid UCODE_MAGIC (0x%08x)\n",
|
||||
*(u32 *)firmware->data);
|
||||
return UCODE_ERROR;
|
||||
ret = UCODE_ERROR;
|
||||
if (*(u32 *)fw->data != UCODE_MAGIC) {
|
||||
pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
|
||||
goto fw_release;
|
||||
}
|
||||
|
||||
ret = generic_load_microcode(cpu, firmware->data, firmware->size);
|
||||
ret = generic_load_microcode(cpu, fw->data, fw->size);
|
||||
|
||||
release_firmware(firmware);
|
||||
fw_release:
|
||||
release_firmware(fw);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -319,7 +341,7 @@ static void microcode_fini_cpu_amd(int cpu)
|
||||
|
||||
static struct microcode_ops microcode_amd_ops = {
|
||||
.request_microcode_user = request_microcode_user,
|
||||
.request_microcode_fw = request_microcode_fw,
|
||||
.request_microcode_fw = request_microcode_amd,
|
||||
.collect_cpu_info = collect_cpu_info_amd,
|
||||
.apply_microcode = apply_microcode_amd,
|
||||
.microcode_fini_cpu = microcode_fini_cpu_amd,
|
||||
|
@@ -417,8 +417,10 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR)
|
||||
err = -EINVAL;
|
||||
if (microcode_init_cpu(cpu) == UCODE_ERROR) {
|
||||
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@@ -110,12 +110,9 @@ void show_regs_common(void)
|
||||
init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
printk(KERN_CONT " ");
|
||||
printk(KERN_CONT "%s %s", vendor, product);
|
||||
if (board) {
|
||||
printk(KERN_CONT "/");
|
||||
printk(KERN_CONT "%s", board);
|
||||
}
|
||||
printk(KERN_CONT " %s %s", vendor, product);
|
||||
if (board)
|
||||
printk(KERN_CONT "/%s", board);
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/pnp.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/vsyscall.h>
|
||||
#include <asm/x86_init.h>
|
||||
@@ -236,6 +237,8 @@ static __init int add_rtc_cmos(void)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (of_have_populated_dt())
|
||||
return 0;
|
||||
|
||||
platform_device_register(&rtc_device);
|
||||
dev_info(&rtc_device.dev,
|
||||
|
@@ -113,6 +113,7 @@
|
||||
#endif
|
||||
#include <asm/mce.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
/*
|
||||
* end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
|
||||
@@ -293,10 +294,32 @@ static void __init init_gbpages(void)
|
||||
else
|
||||
direct_gbpages = 0;
|
||||
}
|
||||
|
||||
static void __init cleanup_highmap_brk_end(void)
|
||||
{
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
mmu_cr4_features = read_cr4();
|
||||
|
||||
/*
|
||||
* _brk_end cannot change anymore, but it and _end may be
|
||||
* located on different 2M pages. cleanup_highmap(), however,
|
||||
* can only consider _end when it runs, so destroy any
|
||||
* mappings beyond _brk_end here.
|
||||
*/
|
||||
pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
|
||||
pmd = pmd_offset(pud, _brk_end - 1);
|
||||
while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
|
||||
pmd_clear(pmd);
|
||||
}
|
||||
#else
|
||||
static inline void init_gbpages(void)
|
||||
{
|
||||
}
|
||||
static inline void cleanup_highmap_brk_end(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init reserve_brk(void)
|
||||
@@ -307,6 +330,8 @@ static void __init reserve_brk(void)
|
||||
/* Mark brk area as locked down and no longer taking any
|
||||
new allocations */
|
||||
_brk_start = 0;
|
||||
|
||||
cleanup_highmap_brk_end();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@@ -429,16 +454,30 @@ static void __init parse_setup_data(void)
|
||||
return;
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
while (pa_data) {
|
||||
data = early_memremap(pa_data, PAGE_SIZE);
|
||||
u32 data_len, map_len;
|
||||
|
||||
map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
|
||||
(u64)sizeof(struct setup_data));
|
||||
data = early_memremap(pa_data, map_len);
|
||||
data_len = data->len + sizeof(struct setup_data);
|
||||
if (data_len > map_len) {
|
||||
early_iounmap(data, map_len);
|
||||
data = early_memremap(pa_data, data_len);
|
||||
map_len = data_len;
|
||||
}
|
||||
|
||||
switch (data->type) {
|
||||
case SETUP_E820_EXT:
|
||||
parse_e820_ext(data, pa_data);
|
||||
parse_e820_ext(data);
|
||||
break;
|
||||
case SETUP_DTB:
|
||||
add_dtb(pa_data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
pa_data = data->next;
|
||||
early_iounmap(data, PAGE_SIZE);
|
||||
early_iounmap(data, map_len);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -680,15 +719,6 @@ static int __init parse_reservelow(char *p)
|
||||
|
||||
early_param("reservelow", parse_reservelow);
|
||||
|
||||
static u64 __init get_max_mapped(void)
|
||||
{
|
||||
u64 end = max_pfn_mapped;
|
||||
|
||||
end <<= PAGE_SHIFT;
|
||||
|
||||
return end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if we were loaded by an EFI loader. If so, then we have also been
|
||||
* passed the efi memmap, systab, etc., so we should use these data structures
|
||||
@@ -704,8 +734,6 @@ static u64 __init get_max_mapped(void)
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
int acpi = 0;
|
||||
int amd = 0;
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -984,19 +1012,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
early_acpi_boot_init();
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
/*
|
||||
* Parse SRAT to discover nodes.
|
||||
*/
|
||||
acpi = acpi_numa_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AMD_NUMA
|
||||
if (!acpi)
|
||||
amd = !amd_numa_init(0, max_pfn);
|
||||
#endif
|
||||
|
||||
initmem_init(0, max_pfn, acpi, amd);
|
||||
initmem_init();
|
||||
memblock_find_dma_reserve();
|
||||
dma32_reserve_bootmem();
|
||||
|
||||
@@ -1029,8 +1045,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
* Read APIC and some other early information from ACPI tables.
|
||||
*/
|
||||
acpi_boot_init();
|
||||
|
||||
sfi_init();
|
||||
x86_dtb_init();
|
||||
|
||||
/*
|
||||
* get boot-time SMP configuration:
|
||||
@@ -1040,9 +1056,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
prefill_possible_map();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
init_cpu_to_node();
|
||||
#endif
|
||||
|
||||
init_apic_mappings();
|
||||
ioapic_and_gsi_init();
|
||||
@@ -1066,6 +1080,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
#endif
|
||||
x86_init.oem.banner();
|
||||
|
||||
x86_init.timers.wallclock_init();
|
||||
|
||||
mcheck_init();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void)
|
||||
per_cpu(x86_bios_cpu_apicid, cpu) =
|
||||
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_32
|
||||
per_cpu(x86_cpu_to_logical_apicid, cpu) =
|
||||
early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
per_cpu(irq_stack_ptr, cpu) =
|
||||
per_cpu(irq_stack_union.irq_stack, cpu) +
|
||||
IRQ_STACK_SIZE - 64;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
per_cpu(x86_cpu_to_node_map, cpu) =
|
||||
early_per_cpu_map(x86_cpu_to_node_map, cpu);
|
||||
@@ -241,7 +246,6 @@ void __init setup_per_cpu_areas(void)
|
||||
* So set them all (boot cpu and all APs).
|
||||
*/
|
||||
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
|
||||
#endif
|
||||
#endif
|
||||
/*
|
||||
* Up to this point, the boot CPU has been using .init.data
|
||||
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void)
|
||||
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
|
||||
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
|
||||
#endif
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
|
||||
#ifdef CONFIG_X86_32
|
||||
early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
|
||||
#endif
|
||||
|
||||
|
@@ -64,6 +64,7 @@
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uv/uv.h>
|
||||
#include <linux/mc146818rtc.h>
|
||||
@@ -71,10 +72,6 @@
|
||||
#include <asm/smpboot_hooks.h>
|
||||
#include <asm/i8259.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
u8 apicid_2_node[MAX_APICID];
|
||||
#endif
|
||||
|
||||
/* State of each CPU */
|
||||
DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
||||
|
||||
@@ -130,68 +127,14 @@ EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
||||
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
|
||||
DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
atomic_t init_deasserted;
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
|
||||
/* which node each logical CPU is on */
|
||||
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
|
||||
EXPORT_SYMBOL(cpu_to_node_map);
|
||||
|
||||
/* set up a mapping between cpu and node. */
|
||||
static void map_cpu_to_node(int cpu, int node)
|
||||
{
|
||||
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
|
||||
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
|
||||
cpu_to_node_map[cpu] = node;
|
||||
}
|
||||
|
||||
/* undo a mapping between cpu and node. */
|
||||
static void unmap_cpu_to_node(int cpu)
|
||||
{
|
||||
int node;
|
||||
|
||||
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
|
||||
for (node = 0; node < MAX_NUMNODES; node++)
|
||||
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
|
||||
cpu_to_node_map[cpu] = 0;
|
||||
}
|
||||
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
|
||||
#define map_cpu_to_node(cpu, node) ({})
|
||||
#define unmap_cpu_to_node(cpu) ({})
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static int boot_cpu_logical_apicid;
|
||||
|
||||
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
|
||||
{ [0 ... NR_CPUS-1] = BAD_APICID };
|
||||
|
||||
static void map_cpu_to_logical_apicid(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int apicid = logical_smp_processor_id();
|
||||
int node = apic->apicid_to_node(apicid);
|
||||
|
||||
if (!node_online(node))
|
||||
node = first_online_node;
|
||||
|
||||
cpu_2_logical_apicid[cpu] = apicid;
|
||||
map_cpu_to_node(cpu, node);
|
||||
}
|
||||
|
||||
void numa_remove_cpu(int cpu)
|
||||
{
|
||||
cpu_2_logical_apicid[cpu] = BAD_APICID;
|
||||
unmap_cpu_to_node(cpu);
|
||||
}
|
||||
#else
|
||||
#define map_cpu_to_logical_apicid() do {} while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Report back to the Boot Processor.
|
||||
* Running on AP.
|
||||
@@ -259,7 +202,6 @@ static void __cpuinit smp_callin(void)
|
||||
apic->smp_callin_clear_local_apic();
|
||||
setup_local_APIC();
|
||||
end_local_APIC_setup();
|
||||
map_cpu_to_logical_apicid();
|
||||
|
||||
/*
|
||||
* Need to setup vector mappings before we enable interrupts.
|
||||
@@ -355,23 +297,6 @@ notrace static void __cpuinit start_secondary(void *unused)
|
||||
cpu_idle();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* In this case, llc_shared_map is a pointer to a cpumask. */
|
||||
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
||||
const struct cpuinfo_x86 *src)
|
||||
{
|
||||
struct cpumask *llc = dst->llc_shared_map;
|
||||
*dst = *src;
|
||||
dst->llc_shared_map = llc;
|
||||
}
|
||||
#else
|
||||
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
|
||||
const struct cpuinfo_x86 *src)
|
||||
{
|
||||
*dst = *src;
|
||||
}
|
||||
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
||||
|
||||
/*
|
||||
* The bootstrap kernel entry code has set these up. Save them for
|
||||
* a given CPU
|
||||
@@ -381,7 +306,7 @@ void __cpuinit smp_store_cpu_info(int id)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(id);
|
||||
|
||||
copy_cpuinfo_x86(c, &boot_cpu_data);
|
||||
*c = boot_cpu_data;
|
||||
c->cpu_index = id;
|
||||
if (id != 0)
|
||||
identify_secondary_cpu(c);
|
||||
@@ -389,15 +314,12 @@ void __cpuinit smp_store_cpu_info(int id)
|
||||
|
||||
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
|
||||
{
|
||||
struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
|
||||
struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
|
||||
|
||||
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
|
||||
cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
|
||||
cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
|
||||
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
|
||||
cpumask_set_cpu(cpu1, c2->llc_shared_map);
|
||||
cpumask_set_cpu(cpu2, c1->llc_shared_map);
|
||||
cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
|
||||
cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
|
||||
}
|
||||
|
||||
|
||||
@@ -426,7 +348,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
|
||||
|
||||
if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
|
||||
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
||||
@@ -437,8 +359,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
||||
cpumask_set_cpu(i, c->llc_shared_map);
|
||||
cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
|
||||
cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
|
||||
}
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
||||
cpumask_set_cpu(i, cpu_core_mask(cpu));
|
||||
@@ -477,7 +399,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
!(cpu_has(c, X86_FEATURE_AMD_DCM)))
|
||||
return cpu_core_mask(cpu);
|
||||
else
|
||||
return c->llc_shared_map;
|
||||
return cpu_llc_shared_mask(cpu);
|
||||
}
|
||||
|
||||
static void impress_friends(void)
|
||||
@@ -946,6 +868,14 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_disable_smp_support() - disables SMP support for x86 at runtime
|
||||
*/
|
||||
void arch_disable_smp_support(void)
|
||||
{
|
||||
disable_ioapic_support();
|
||||
}
|
||||
|
||||
/*
|
||||
* Fall back to non SMP mode after errors.
|
||||
*
|
||||
@@ -961,7 +891,6 @@ static __init void disable_smp(void)
|
||||
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
|
||||
else
|
||||
physid_set_mask_of_physid(0, &phys_cpu_present_map);
|
||||
map_cpu_to_logical_apicid();
|
||||
cpumask_set_cpu(0, cpu_sibling_mask(0));
|
||||
cpumask_set_cpu(0, cpu_core_mask(0));
|
||||
}
|
||||
@@ -1046,7 +975,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
|
||||
"(tell your hw vendor)\n");
|
||||
}
|
||||
smpboot_clear_io_apic();
|
||||
arch_disable_smp_support();
|
||||
disable_ioapic_support();
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -1090,21 +1019,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
preempt_disable();
|
||||
smp_cpu_index_default();
|
||||
memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
|
||||
cpumask_copy(cpu_callin_mask, cpumask_of(0));
|
||||
mb();
|
||||
|
||||
/*
|
||||
* Setup boot CPU information
|
||||
*/
|
||||
smp_store_cpu_info(0); /* Final full version of the data */
|
||||
#ifdef CONFIG_X86_32
|
||||
boot_cpu_logical_apicid = logical_smp_processor_id();
|
||||
#endif
|
||||
cpumask_copy(cpu_callin_mask, cpumask_of(0));
|
||||
mb();
|
||||
|
||||
current_thread_info()->cpu = 0; /* needed? */
|
||||
for_each_possible_cpu(i) {
|
||||
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
|
||||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
@@ -1140,8 +1067,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
bsp_end_local_APIC_setup();
|
||||
|
||||
map_cpu_to_logical_apicid();
|
||||
|
||||
if (apic->setup_portio_remap)
|
||||
apic->setup_portio_remap();
|
||||
|
||||
|
@@ -340,3 +340,6 @@ ENTRY(sys_call_table)
|
||||
.long sys_fanotify_init
|
||||
.long sys_fanotify_mark
|
||||
.long sys_prlimit64 /* 340 */
|
||||
.long sys_name_to_handle_at
|
||||
.long sys_open_by_handle_at
|
||||
.long sys_clock_adjtime
|
||||
|
@@ -105,6 +105,7 @@ SECTIONS
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
ENTRY_TEXT
|
||||
IRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
@@ -305,7 +306,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
|
||||
PERCPU(THREAD_SIZE)
|
||||
PERCPU(PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
@@ -52,6 +52,7 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(__memcpy);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
|
@@ -70,6 +70,7 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.setup_percpu_clockev = setup_boot_APIC_clock,
|
||||
.tsc_pre_init = x86_init_noop,
|
||||
.timer_init = hpet_time_init,
|
||||
.wallclock_init = x86_init_noop,
|
||||
},
|
||||
|
||||
.iommu = {
|
||||
|
مرجع در شماره جدید
Block a user