Merge branch 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (76 commits) x86, apic: Fix dummy apic read operation together with broken MP handling x86, apic: Restore irqs on fail paths x86: Print real IOAPIC version for x86-64 x86: enable_update_mptable should be a macro sparseirq: Allow early irq_desc allocation x86, io-apic: Don't mark pin_programmed early x86, irq: don't call mp_config_acpi_gsi() if update_mptable is not enabled x86, irq: update_mptable needs pci_routeirq x86: don't call read_apic_id if !cpu_has_apic x86, apic: introduce io_apic_irq_attr x86/pci: add 4 more return parameters to IO_APIC_get_PCI_irq_vector(), fix x86: read apic ID in the !acpi_lapic case x86: apic: Fixmap apic address even if apic disabled x86: display extended apic registers with print_local_APIC and cpu_debug code x86: read apic ID in the !acpi_lapic case x86: clean up and fix setup_clear/force_cpu_cap handling x86: apic: Check rev 3 fadt correctly for physical_apic bit x86/pci: update pirq_enable_irq() to setup io apic routing x86/acpi: move setup io apic routing out of CONFIG_ACPI scope x86/pci: add 4 more return parameters to IO_APIC_get_PCI_irq_vector() ...
This commit is contained in:
@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
|
||||
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
|
||||
obj-y += setup.o i8259.o irqinit_$(BITS).o
|
||||
obj-y += setup.o i8259.o irqinit.o
|
||||
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
|
||||
obj-$(CONFIG_X86_32) += probe_roms_32.o
|
||||
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io_apic.h>
|
||||
@@ -522,7 +523,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned int plat_gsi = gsi;
|
||||
@@ -532,14 +533,14 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
* Make sure all (legacy) PCI IRQs are set as level-triggered.
|
||||
*/
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
|
||||
if (triggering == ACPI_LEVEL_SENSITIVE)
|
||||
if (trigger == ACPI_LEVEL_SENSITIVE)
|
||||
eisa_set_level_irq(gsi);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
|
||||
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
|
||||
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
|
||||
}
|
||||
#endif
|
||||
acpi_gsi_to_irq(plat_gsi, &irq);
|
||||
@@ -903,10 +904,8 @@ extern int es7000_plat;
|
||||
#endif
|
||||
|
||||
static struct {
|
||||
int apic_id;
|
||||
int gsi_base;
|
||||
int gsi_end;
|
||||
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
|
||||
} mp_ioapic_routing[MAX_IO_APICS];
|
||||
|
||||
int mp_find_ioapic(int gsi)
|
||||
@@ -986,16 +985,12 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
|
||||
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
|
||||
mp_ioapics[idx].apicid = uniq_ioapic_id(id);
|
||||
#ifdef CONFIG_X86_32
|
||||
mp_ioapics[idx].apicver = io_apic_get_version(idx);
|
||||
#else
|
||||
mp_ioapics[idx].apicver = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
|
||||
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
|
||||
*/
|
||||
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
|
||||
mp_ioapic_routing[idx].gsi_base = gsi_base;
|
||||
mp_ioapic_routing[idx].gsi_end = gsi_base +
|
||||
io_apic_get_redir_entries(idx);
|
||||
@@ -1158,26 +1153,52 @@ void __init mp_config_acpi_legacy_irqs(void)
|
||||
}
|
||||
}
|
||||
|
||||
int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
|
||||
int polarity)
|
||||
{
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
struct mpc_intsrc mp_irq;
|
||||
struct pci_dev *pdev;
|
||||
unsigned char number;
|
||||
unsigned int devfn;
|
||||
int ioapic;
|
||||
u8 pin;
|
||||
|
||||
if (!acpi_ioapic)
|
||||
return 0;
|
||||
if (!dev)
|
||||
return 0;
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return 0;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
number = pdev->bus->number;
|
||||
devfn = pdev->devfn;
|
||||
pin = pdev->pin;
|
||||
/* print the entry should happen on mptable identically */
|
||||
mp_irq.type = MP_INTSRC;
|
||||
mp_irq.irqtype = mp_INT;
|
||||
mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
|
||||
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
|
||||
mp_irq.srcbus = number;
|
||||
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
mp_irq.dstapic = mp_ioapics[ioapic].apicid;
|
||||
mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
|
||||
|
||||
save_mp_irq(&mp_irq);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
||||
{
|
||||
int ioapic;
|
||||
int ioapic_pin;
|
||||
#ifdef CONFIG_X86_32
|
||||
#define MAX_GSI_NUM 4096
|
||||
#define IRQ_COMPRESSION_START 64
|
||||
|
||||
static int pci_irq = IRQ_COMPRESSION_START;
|
||||
/*
|
||||
* Mapping between Global System Interrupts, which
|
||||
* represent all possible interrupts, and IRQs
|
||||
* assigned to actual devices.
|
||||
*/
|
||||
static int gsi_to_irq[MAX_GSI_NUM];
|
||||
#else
|
||||
struct io_apic_irq_attr irq_attr;
|
||||
|
||||
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
|
||||
return gsi;
|
||||
#endif
|
||||
|
||||
/* Don't set up the ACPI SCI because it's already set up */
|
||||
if (acpi_gbl_FADT.sci_interrupt == gsi)
|
||||
@@ -1196,95 +1217,24 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
gsi = ioapic_renumber_irq(ioapic, gsi);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Avoid pin reprogramming. PRTs typically include entries
|
||||
* with redundant pin->gsi mappings (but unique PCI devices);
|
||||
* we only program the IOAPIC on the first.
|
||||
*/
|
||||
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
|
||||
printk(KERN_ERR "Invalid reference to IOAPIC pin "
|
||||
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
|
||||
"%d-%d\n", mp_ioapics[ioapic].apicid,
|
||||
ioapic_pin);
|
||||
return gsi;
|
||||
}
|
||||
if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
|
||||
#ifdef CONFIG_X86_32
|
||||
return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
|
||||
#else
|
||||
return gsi;
|
||||
#endif
|
||||
}
|
||||
|
||||
set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* For GSI >= 64, use IRQ compression
|
||||
*/
|
||||
if ((gsi >= IRQ_COMPRESSION_START)
|
||||
&& (triggering == ACPI_LEVEL_SENSITIVE)) {
|
||||
/*
|
||||
* For PCI devices assign IRQs in order, avoiding gaps
|
||||
* due to unused I/O APIC pins.
|
||||
*/
|
||||
int irq = gsi;
|
||||
if (gsi < MAX_GSI_NUM) {
|
||||
/*
|
||||
* Retain the VIA chipset work-around (gsi > 15), but
|
||||
* avoid a problem where the 8254 timer (IRQ0) is setup
|
||||
* via an override (so it's not on pin 0 of the ioapic),
|
||||
* and at the same time, the pin 0 interrupt is a PCI
|
||||
* type. The gsi > 15 test could cause these two pins
|
||||
* to be shared as IRQ0, and they are not shareable.
|
||||
* So test for this condition, and if necessary, avoid
|
||||
* the pin collision.
|
||||
*/
|
||||
gsi = pci_irq++;
|
||||
/*
|
||||
* Don't assign IRQ used by ACPI SCI
|
||||
*/
|
||||
if (gsi == acpi_gbl_FADT.sci_interrupt)
|
||||
gsi = pci_irq++;
|
||||
gsi_to_irq[irq] = gsi;
|
||||
} else {
|
||||
printk(KERN_ERR "GSI %u is too high\n", gsi);
|
||||
return gsi;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
|
||||
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
if (enable_update_mptable)
|
||||
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
|
||||
|
||||
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
|
||||
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
io_apic_set_pci_routing(dev, gsi, &irq_attr);
|
||||
|
||||
return gsi;
|
||||
}
|
||||
|
||||
int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
struct mpc_intsrc mp_irq;
|
||||
int ioapic;
|
||||
|
||||
if (!acpi_ioapic)
|
||||
return 0;
|
||||
|
||||
/* print the entry should happen on mptable identically */
|
||||
mp_irq.type = MP_INTSRC;
|
||||
mp_irq.irqtype = mp_INT;
|
||||
mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
|
||||
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
|
||||
mp_irq.srcbus = number;
|
||||
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
|
||||
mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
|
||||
|
||||
save_mp_irq(&mp_irq);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse IOAPIC related entries in MADT
|
||||
* returns 0 on success, < 0 on error
|
||||
|
@@ -98,6 +98,29 @@ early_param("lapic", parse_lapic);
|
||||
/* Local APIC was disabled by the BIOS and enabled by the kernel */
|
||||
static int enabled_via_apicbase;
|
||||
|
||||
/*
|
||||
* Handle interrupt mode configuration register (IMCR).
|
||||
* This register controls whether the interrupt signals
|
||||
* that reach the BSP come from the master PIC or from the
|
||||
* local APIC. Before entering Symmetric I/O Mode, either
|
||||
* the BIOS or the operating system must switch out of
|
||||
* PIC Mode by changing the IMCR.
|
||||
*/
|
||||
static inline void imcr_pic_to_apic(void)
|
||||
{
|
||||
/* select IMCR register */
|
||||
outb(0x70, 0x22);
|
||||
/* NMI and 8259 INTR go through APIC */
|
||||
outb(0x01, 0x23);
|
||||
}
|
||||
|
||||
static inline void imcr_apic_to_pic(void)
|
||||
{
|
||||
/* select IMCR register */
|
||||
outb(0x70, 0x22);
|
||||
/* NMI and 8259 INTR go directly to BSP */
|
||||
outb(0x00, 0x23);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -111,13 +134,19 @@ static __init int setup_apicpmtimer(char *s)
|
||||
__setup("apicpmtimer", setup_apicpmtimer);
|
||||
#endif
|
||||
|
||||
int x2apic_mode;
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
int x2apic;
|
||||
/* x2apic enabled before OS handover */
|
||||
static int x2apic_preenabled;
|
||||
static int disable_x2apic;
|
||||
static __init int setup_nox2apic(char *str)
|
||||
{
|
||||
if (x2apic_enabled()) {
|
||||
pr_warning("Bios already enabled x2apic, "
|
||||
"can't enforce nox2apic");
|
||||
return 0;
|
||||
}
|
||||
|
||||
disable_x2apic = 1;
|
||||
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
|
||||
return 0;
|
||||
@@ -209,6 +238,31 @@ static int modern_apic(void)
|
||||
return lapic_get_version() >= 0x14;
|
||||
}
|
||||
|
||||
/*
|
||||
* bare function to substitute write operation
|
||||
* and it's _that_ fast :)
|
||||
*/
|
||||
static void native_apic_write_dummy(u32 reg, u32 v)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
|
||||
}
|
||||
|
||||
static u32 native_apic_read_dummy(u32 reg)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* right after this call apic->write/read doesn't do anything
|
||||
* note that there is no restore operation it works one way
|
||||
*/
|
||||
void apic_disable(void)
|
||||
{
|
||||
apic->read = native_apic_read_dummy;
|
||||
apic->write = native_apic_write_dummy;
|
||||
}
|
||||
|
||||
void native_apic_wait_icr_idle(void)
|
||||
{
|
||||
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
|
||||
@@ -348,7 +402,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
||||
|
||||
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
||||
{
|
||||
unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
|
||||
unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0);
|
||||
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
||||
|
||||
apic_write(reg, v);
|
||||
@@ -815,7 +869,7 @@ void clear_local_APIC(void)
|
||||
u32 v;
|
||||
|
||||
/* APIC hasn't been mapped yet */
|
||||
if (!x2apic && !apic_phys)
|
||||
if (!x2apic_mode && !apic_phys)
|
||||
return;
|
||||
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
@@ -1287,7 +1341,7 @@ void check_x2apic(void)
|
||||
{
|
||||
if (x2apic_enabled()) {
|
||||
pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
|
||||
x2apic_preenabled = x2apic = 1;
|
||||
x2apic_preenabled = x2apic_mode = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1295,7 +1349,7 @@ void enable_x2apic(void)
|
||||
{
|
||||
int msr, msr2;
|
||||
|
||||
if (!x2apic)
|
||||
if (!x2apic_mode)
|
||||
return;
|
||||
|
||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
||||
@@ -1304,6 +1358,7 @@ void enable_x2apic(void)
|
||||
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_X86_X2APIC */
|
||||
|
||||
void __init enable_IR_x2apic(void)
|
||||
{
|
||||
@@ -1312,32 +1367,21 @@ void __init enable_IR_x2apic(void)
|
||||
unsigned long flags;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (!x2apic_preenabled && disable_x2apic) {
|
||||
pr_info("Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of nox2apic\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (x2apic_preenabled && disable_x2apic)
|
||||
panic("Bios already enabled x2apic, can't enforce nox2apic");
|
||||
|
||||
if (!x2apic_preenabled && skip_ioapic_setup) {
|
||||
pr_info("Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of skipping io-apic setup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = dmar_table_init();
|
||||
if (ret) {
|
||||
pr_info("dmar_table_init() failed with %d:\n", ret);
|
||||
pr_debug("dmar_table_init() failed with %d:\n", ret);
|
||||
goto ir_failed;
|
||||
}
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
else
|
||||
pr_info("Not enabling x2apic,Intr-remapping\n");
|
||||
if (!intr_remapping_supported()) {
|
||||
pr_debug("intr-remapping not supported\n");
|
||||
goto ir_failed;
|
||||
}
|
||||
|
||||
|
||||
if (!x2apic_preenabled && skip_ioapic_setup) {
|
||||
pr_info("Skipped enabling intr-remap because of skipping "
|
||||
"io-apic setup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1357,19 +1401,16 @@ void __init enable_IR_x2apic(void)
|
||||
mask_IO_APIC_setup(ioapic_entries);
|
||||
mask_8259A();
|
||||
|
||||
ret = enable_intr_remapping(EIM_32BIT_APIC_ID);
|
||||
|
||||
if (ret && x2apic_preenabled) {
|
||||
local_irq_restore(flags);
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
}
|
||||
|
||||
ret = enable_intr_remapping(x2apic_supported());
|
||||
if (ret)
|
||||
goto end_restore;
|
||||
|
||||
if (!x2apic) {
|
||||
x2apic = 1;
|
||||
pr_info("Enabled Interrupt-remapping\n");
|
||||
|
||||
if (x2apic_supported() && !x2apic_mode) {
|
||||
x2apic_mode = 1;
|
||||
enable_x2apic();
|
||||
pr_info("Enabled x2apic\n");
|
||||
}
|
||||
|
||||
end_restore:
|
||||
@@ -1378,37 +1419,34 @@ end_restore:
|
||||
* IR enabling failed
|
||||
*/
|
||||
restore_IO_APIC_setup(ioapic_entries);
|
||||
else
|
||||
reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries);
|
||||
|
||||
unmask_8259A();
|
||||
local_irq_restore(flags);
|
||||
|
||||
end:
|
||||
if (!ret) {
|
||||
if (!x2apic_preenabled)
|
||||
pr_info("Enabled x2apic and interrupt-remapping\n");
|
||||
else
|
||||
pr_info("Enabled Interrupt-remapping\n");
|
||||
} else
|
||||
pr_err("Failed to enable Interrupt-remapping and x2apic\n");
|
||||
if (ioapic_entries)
|
||||
free_ioapic_entries(ioapic_entries);
|
||||
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
ir_failed:
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
else if (cpu_has_x2apic)
|
||||
pr_info("Not enabling x2apic,Intr-remapping\n");
|
||||
#else
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled prior OS handover,"
|
||||
" enable CONFIG_INTR_REMAP");
|
||||
|
||||
pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
|
||||
" and x2apic\n");
|
||||
" enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_X86_X2APIC */
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
@@ -1425,7 +1463,6 @@ static int __init detect_init_APIC(void)
|
||||
}
|
||||
|
||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
boot_cpu_physical_apicid = 0;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
@@ -1539,32 +1576,49 @@ void __init early_init_lapic_mapping(void)
|
||||
*/
|
||||
void __init init_apic_mappings(void)
|
||||
{
|
||||
if (x2apic) {
|
||||
unsigned int new_apicid;
|
||||
|
||||
if (x2apic_mode) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If no local APIC can be found then set up a fake all
|
||||
* zeroes page to simulate the local APIC and another
|
||||
* one for the IO-APIC.
|
||||
*/
|
||||
/* If no local APIC can be found return early */
|
||||
if (!smp_found_config && detect_init_APIC()) {
|
||||
apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
||||
apic_phys = __pa(apic_phys);
|
||||
} else
|
||||
/* lets NOP'ify apic operations */
|
||||
pr_info("APIC: disable apic facility\n");
|
||||
apic_disable();
|
||||
} else {
|
||||
apic_phys = mp_lapic_addr;
|
||||
|
||||
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
|
||||
APIC_BASE, apic_phys);
|
||||
/*
|
||||
* acpi lapic path already maps that address in
|
||||
* acpi_register_lapic_address()
|
||||
*/
|
||||
if (!acpi_lapic)
|
||||
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
|
||||
APIC_BASE, apic_phys);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch the APIC ID of the BSP in case we have a
|
||||
* default configuration (or the MP table is broken).
|
||||
*/
|
||||
if (boot_cpu_physical_apicid == -1U)
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
new_apicid = read_apic_id();
|
||||
if (boot_cpu_physical_apicid != new_apicid) {
|
||||
boot_cpu_physical_apicid = new_apicid;
|
||||
/*
|
||||
* yeah -- we lie about apic_version
|
||||
* in case if apic was disabled via boot option
|
||||
* but it's not a problem for SMP compiled kernel
|
||||
* since smp_sanity_check is prepared for such a case
|
||||
* and disable smp mode
|
||||
*/
|
||||
apic_version[new_apicid] =
|
||||
GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1733,8 +1787,7 @@ void __init connect_bsp_APIC(void)
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, "leaving PIC mode, "
|
||||
"enabling APIC mode.\n");
|
||||
outb(0x70, 0x22);
|
||||
outb(0x01, 0x23);
|
||||
imcr_pic_to_apic();
|
||||
}
|
||||
#endif
|
||||
if (apic->enable_apic_mode)
|
||||
@@ -1762,8 +1815,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, "disabling APIC mode, "
|
||||
"entering PIC mode.\n");
|
||||
outb(0x70, 0x22);
|
||||
outb(0x00, 0x23);
|
||||
imcr_apic_to_pic();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@@ -1969,10 +2021,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
|
||||
|
||||
local_irq_save(flags);
|
||||
disable_local_APIC();
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
|
||||
if (intr_remapping_enabled)
|
||||
disable_intr_remapping();
|
||||
#endif
|
||||
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
@@ -1982,42 +2034,34 @@ static int lapic_resume(struct sys_device *dev)
|
||||
unsigned int l, h;
|
||||
unsigned long flags;
|
||||
int maxlvt;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
|
||||
if (!apic_pm_state.active)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (x2apic) {
|
||||
if (intr_remapping_enabled) {
|
||||
ioapic_entries = alloc_ioapic_entries();
|
||||
if (!ioapic_entries) {
|
||||
WARN(1, "Alloc ioapic_entries in lapic resume failed.");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto restore;
|
||||
}
|
||||
|
||||
ret = save_IO_APIC_setup(ioapic_entries);
|
||||
if (ret) {
|
||||
WARN(1, "Saving IO-APIC state failed: %d\n", ret);
|
||||
free_ioapic_entries(ioapic_entries);
|
||||
return ret;
|
||||
goto restore;
|
||||
}
|
||||
|
||||
mask_IO_APIC_setup(ioapic_entries);
|
||||
mask_8259A();
|
||||
enable_x2apic();
|
||||
}
|
||||
#else
|
||||
if (!apic_pm_state.active)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (x2apic)
|
||||
if (x2apic_mode)
|
||||
enable_x2apic();
|
||||
#endif
|
||||
|
||||
else {
|
||||
/*
|
||||
* Make sure the APICBASE points to the right address
|
||||
@@ -2055,21 +2099,16 @@ static int lapic_resume(struct sys_device *dev)
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_read(APIC_ESR);
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (intr_remapping_enabled)
|
||||
reenable_intr_remapping(EIM_32BIT_APIC_ID);
|
||||
|
||||
if (x2apic) {
|
||||
if (intr_remapping_enabled) {
|
||||
reenable_intr_remapping(x2apic_mode);
|
||||
unmask_8259A();
|
||||
restore_IO_APIC_setup(ioapic_entries);
|
||||
free_ioapic_entries(ioapic_entries);
|
||||
}
|
||||
#endif
|
||||
|
||||
restore:
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2117,31 +2156,14 @@ static void apic_pm_activate(void) { }
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* apic_is_clustered_box() -- Check if we can expect good TSC
|
||||
*
|
||||
* Thus far, the major user of this is IBM's Summit2 series:
|
||||
*
|
||||
* Clustered boxes may have unsynced TSC problems if they are
|
||||
* multi-chassis. Use available data to take a good guess.
|
||||
* If in doubt, go HPET.
|
||||
*/
|
||||
__cpuinit int apic_is_clustered_box(void)
|
||||
|
||||
static int __cpuinit apic_cluster_num(void)
|
||||
{
|
||||
int i, clusters, zeros;
|
||||
unsigned id;
|
||||
u16 *bios_cpu_apicid;
|
||||
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
/*
|
||||
* there is not this kind of box with AMD CPU yet.
|
||||
* Some AMD box with quadcore cpu and 8 sockets apicid
|
||||
* will be [4, 0x23] or [8, 0x27] could be thought to
|
||||
* vsmp box still need checking...
|
||||
*/
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
|
||||
return 0;
|
||||
|
||||
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
||||
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
@@ -2177,18 +2199,67 @@ __cpuinit int apic_is_clustered_box(void)
|
||||
++zeros;
|
||||
}
|
||||
|
||||
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||
* not guaranteed to be synced between boards
|
||||
*/
|
||||
if (is_vsmp_box() && clusters > 1)
|
||||
return clusters;
|
||||
}
|
||||
|
||||
static int __cpuinitdata multi_checked;
|
||||
static int __cpuinitdata multi;
|
||||
|
||||
static int __cpuinit set_multi(const struct dmi_system_id *d)
|
||||
{
|
||||
if (multi)
|
||||
return 0;
|
||||
pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
|
||||
multi = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
|
||||
{
|
||||
.callback = set_multi,
|
||||
.ident = "IBM System Summit2",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static void __cpuinit dmi_check_multi(void)
|
||||
{
|
||||
if (multi_checked)
|
||||
return;
|
||||
|
||||
dmi_check_system(multi_dmi_table);
|
||||
multi_checked = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* apic_is_clustered_box() -- Check if we can expect good TSC
|
||||
*
|
||||
* Thus far, the major user of this is IBM's Summit2 series:
|
||||
* Clustered boxes may have unsynced TSC problems if they are
|
||||
* multi-chassis.
|
||||
* Use DMI to check them
|
||||
*/
|
||||
__cpuinit int apic_is_clustered_box(void)
|
||||
{
|
||||
dmi_check_multi();
|
||||
if (multi)
|
||||
return 1;
|
||||
|
||||
if (!is_vsmp_box())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If clusters > 2, then should be multi-chassis.
|
||||
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
||||
* out, but AFAIK this will work even for them.
|
||||
* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||
* not guaranteed to be synced between boards
|
||||
*/
|
||||
return (clusters > 2);
|
||||
if (apic_cluster_num() > 1)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -161,7 +161,7 @@ static int flat_apic_id_registered(void)
|
||||
|
||||
static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
{
|
||||
return hard_smp_processor_id() >> index_msb;
|
||||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
|
||||
struct apic apic_flat = {
|
||||
@@ -235,7 +235,7 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
* regardless of how many processors are present (x86_64 ES7000
|
||||
* is an example).
|
||||
*/
|
||||
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
|
||||
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
|
||||
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
|
||||
printk(KERN_DEBUG "system APIC only can use physical flat");
|
||||
return 1;
|
||||
|
@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi)
|
||||
return gsi;
|
||||
}
|
||||
|
||||
static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
|
||||
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
|
||||
{
|
||||
unsigned long vect = 0, psaival = 0;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = {
|
||||
void __init default_setup_apic_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
if (x2apic && (apic != &apic_x2apic_phys &&
|
||||
if (x2apic_mode && (apic != &apic_x2apic_phys &&
|
||||
#ifdef CONFIG_X86_UV
|
||||
apic != &apic_x2apic_uv_x &&
|
||||
#endif
|
||||
|
@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){
|
||||
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
|
||||
}
|
||||
|
||||
|
||||
/* In clustered mode, the high nibble of APIC ID is a cluster number.
|
||||
* The low nibble is a 4-bit bitmap. */
|
||||
#define XAPIC_DEST_CPUS_SHIFT 4
|
||||
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
|
||||
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
|
||||
|
||||
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static const struct cpumask *summit_target_cpus(void)
|
||||
|
@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long val;
|
||||
@@ -583,15 +583,18 @@ void __init uv_system_init(void)
|
||||
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
|
||||
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
|
||||
uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_node_to_blade);
|
||||
memset(uv_node_to_blade, 255, bytes);
|
||||
|
||||
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
|
||||
uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_cpu_to_blade);
|
||||
memset(uv_cpu_to_blade, 255, bytes);
|
||||
|
||||
blade = 0;
|
||||
|
@@ -272,7 +272,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = hard_smp_processor_id();
|
||||
unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
node = c->phys_proc_id;
|
||||
if (apicid_to_node[apicid] != NUMA_NO_NODE)
|
||||
|
@@ -299,7 +299,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
|
||||
return NULL; /* Not found */
|
||||
}
|
||||
|
||||
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
|
||||
__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
|
||||
__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
|
||||
|
||||
void load_percpu_segment(int cpu)
|
||||
{
|
||||
@@ -768,6 +769,12 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
if (this_cpu->c_identify)
|
||||
this_cpu->c_identify(c);
|
||||
|
||||
/* Clear/Set all flags overriden by options, after probe */
|
||||
for (i = 0; i < NCAPINTS; i++) {
|
||||
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
||||
c->x86_capability[i] |= cpu_caps_set[i];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
#endif
|
||||
@@ -813,6 +820,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
|
||||
init_hypervisor(c);
|
||||
|
||||
/*
|
||||
* Clear/Set all flags overriden by options, need do it
|
||||
* before following smp all cpus cap AND.
|
||||
*/
|
||||
for (i = 0; i < NCAPINTS; i++) {
|
||||
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
||||
c->x86_capability[i] |= cpu_caps_set[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* On SMP, boot_cpu_data holds the common feature set between
|
||||
* all CPUs; so make sure that we indicate which features are
|
||||
@@ -825,10 +842,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
||||
}
|
||||
|
||||
/* Clear all flags overriden by options */
|
||||
for (i = 0; i < NCAPINTS; i++)
|
||||
c->x86_capability[i] &= ~cleared_cpu_caps[i];
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
/* Init Machine Check Exception if available. */
|
||||
mcheck_init(c);
|
||||
|
@@ -588,8 +588,20 @@ static void print_apic(void *arg)
|
||||
seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
|
||||
seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
|
||||
seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
|
||||
unsigned int i, v, maxeilvt;
|
||||
|
||||
v = apic_read(APIC_EFEAT);
|
||||
maxeilvt = (v >> 16) & 0xff;
|
||||
seq_printf(seq, " EFEAT\t\t: %08x\n", v);
|
||||
seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
|
||||
|
||||
for (i = 0; i < maxeilvt; i++) {
|
||||
v = apic_read(APIC_EILVTn(i));
|
||||
seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
seq_printf(seq, "\n MSR\t:\n");
|
||||
}
|
||||
|
||||
|
@@ -229,12 +229,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __cpuinit srat_detect_node(void)
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
unsigned node;
|
||||
int cpu = smp_processor_id();
|
||||
int apicid = hard_smp_processor_id();
|
||||
int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
/* Don't do the funky fallback heuristics the AMD version employs
|
||||
for now. */
|
||||
@@ -400,7 +400,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
/* Work around errata */
|
||||
srat_detect_node();
|
||||
srat_detect_node(c);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_VMX))
|
||||
detect_vmx_virtcap(c);
|
||||
|
@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL;
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
if (printk_ratelimit())
|
||||
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq)
|
||||
* completely.
|
||||
* But only ack when the APIC is enabled -AK
|
||||
*/
|
||||
if (cpu_has_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
||||
@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||
sum += irq_stats(cpu)->irq_thermal_count;
|
||||
# ifdef CONFIG_X86_64
|
||||
sum += irq_stats(cpu)->irq_threshold_count;
|
||||
#endif
|
||||
# endif
|
||||
#endif
|
||||
return sum;
|
||||
}
|
||||
@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
|
||||
if (!handle_irq(irq, regs)) {
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!disable_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
ack_APIC_irq();
|
||||
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
|
||||
__func__, smp_processor_id(), vector, irq);
|
||||
pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
|
||||
__func__, smp_processor_id(), vector, irq);
|
||||
}
|
||||
|
||||
irq_exit();
|
||||
|
@@ -1,20 +1,25 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/apic.h>
|
||||
@@ -22,7 +27,23 @@
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/*
|
||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||
* (these are usually mapped to vectors 0x30-0x3f)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The IO-APIC gives us many more interrupt sources. Most of these
|
||||
* are unused but an SMP system is supposed to have enough memory ...
|
||||
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
|
||||
* across the spectrum, so we really want to be prepared to get all
|
||||
* of these. Plus, more powerful systems might have more than 64
|
||||
* IO-APIC registers.
|
||||
*
|
||||
* (these are usually mapped into the 0x30-0xff vector range)
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Note that on a 486, we don't want to do a SIGFPE on an irq13
|
||||
* as the irq is unreliable, and exception 16 works correctly
|
||||
@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = {
|
||||
.handler = math_error_irq,
|
||||
.name = "fpu",
|
||||
};
|
||||
|
||||
void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
init_bsp_APIC();
|
||||
#endif
|
||||
init_8259A(0);
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Overridden in paravirt.c */
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
static void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Execute any quirks before the call gates are initialised: */
|
||||
x86_quirk_pre_intr_init();
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
init_bsp_APIC();
|
||||
#endif
|
||||
init_8259A(0);
|
||||
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||
/* SYSCALL_VECTOR was reserved in trap_init. */
|
||||
if (i != SYSCALL_VECTOR)
|
||||
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
/* Overridden in paravirt.c */
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
|
||||
static void __init smp_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
@@ -160,16 +166,27 @@ void __init native_init_IRQ(void)
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
|
||||
/* IPI for single call function */
|
||||
/* IPI for generic single function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
|
||||
call_function_single_interrupt);
|
||||
call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static void __init apic_intr_init(void)
|
||||
{
|
||||
smp_intr_init();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
/* self generated IPI for local APIC timer */
|
||||
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
|
||||
|
||||
@@ -179,16 +196,67 @@ void __init native_init_IRQ(void)
|
||||
/* IPI vectors for APIC spurious and error interrupts */
|
||||
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
|
||||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
|
||||
/* Performance monitoring interrupts: */
|
||||
# ifdef CONFIG_PERF_COUNTERS
|
||||
alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
|
||||
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
|
||||
/* thermal monitor LVT interrupt */
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
|
||||
*
|
||||
* Description:
|
||||
* Perform any necessary interrupt initialisation prior to setting up
|
||||
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
|
||||
* interrupts should be initialised here if the machine emulates a PC
|
||||
* in any way.
|
||||
**/
|
||||
static void __init x86_quirk_pre_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if (x86_quirks->arch_pre_intr_init) {
|
||||
if (x86_quirks->arch_pre_intr_init())
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
init_ISA_irqs();
|
||||
}
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Execute any quirks before the call gates are initialised: */
|
||||
x86_quirk_pre_intr_init();
|
||||
|
||||
apic_intr_init();
|
||||
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
|
||||
if (!test_bit(i, used_vectors))
|
||||
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||
}
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Call quirks after call gates are initialised (usually add in
|
||||
* the architecture specific gates):
|
||||
@@ -203,4 +271,5 @@ void __init native_init_IRQ(void)
|
||||
setup_irq(FPU_IRQ, &fpu_irq);
|
||||
|
||||
irq_ctx_init(smp_processor_id());
|
||||
#endif
|
||||
}
|
@@ -1,177 +0,0 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/i8259.h>
|
||||
|
||||
/*
|
||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||
* (these are usually mapped to vectors 0x30-0x3f)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The IO-APIC gives us many more interrupt sources. Most of these
|
||||
* are unused but an SMP system is supposed to have enough memory ...
|
||||
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
|
||||
* across the spectrum, so we really want to be prepared to get all
|
||||
* of these. Plus, more powerful systems might have more than 64
|
||||
* IO-APIC registers.
|
||||
*
|
||||
* (these are usually mapped into the 0x30-0xff vector range)
|
||||
*/
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
*/
|
||||
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.name = "cascade",
|
||||
};
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... IRQ0_VECTOR - 1] = -1,
|
||||
[IRQ0_VECTOR] = 0,
|
||||
[IRQ1_VECTOR] = 1,
|
||||
[IRQ2_VECTOR] = 2,
|
||||
[IRQ3_VECTOR] = 3,
|
||||
[IRQ4_VECTOR] = 4,
|
||||
[IRQ5_VECTOR] = 5,
|
||||
[IRQ6_VECTOR] = 6,
|
||||
[IRQ7_VECTOR] = 7,
|
||||
[IRQ8_VECTOR] = 8,
|
||||
[IRQ9_VECTOR] = 9,
|
||||
[IRQ10_VECTOR] = 10,
|
||||
[IRQ11_VECTOR] = 11,
|
||||
[IRQ12_VECTOR] = 12,
|
||||
[IRQ13_VECTOR] = 13,
|
||||
[IRQ14_VECTOR] = 14,
|
||||
[IRQ15_VECTOR] = 15,
|
||||
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
|
||||
};
|
||||
|
||||
int vector_used_by_percpu_irq(unsigned int vector)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != -1)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
init_bsp_APIC();
|
||||
init_8259A(0);
|
||||
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
static void __init smp_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
*/
|
||||
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
|
||||
|
||||
/* IPIs for invalidation */
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
|
||||
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
|
||||
/* IPI for generic single function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
|
||||
call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init apic_intr_init(void)
|
||||
{
|
||||
smp_intr_init();
|
||||
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
|
||||
/* self generated IPI for local APIC timer */
|
||||
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
|
||||
|
||||
/* generic IPI for platform specific use */
|
||||
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
|
||||
|
||||
/* IPI vectors for APIC spurious and error interrupts */
|
||||
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
|
||||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
}
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
init_ISA_irqs();
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
|
||||
int vector = FIRST_EXTERNAL_VECTOR + i;
|
||||
if (vector != IA32_SYSCALL_VECTOR)
|
||||
set_intr_gate(vector, interrupt[i]);
|
||||
}
|
||||
|
||||
apic_intr_init();
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
}
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/mpspec.h>
|
||||
@@ -870,24 +871,17 @@ static
|
||||
inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
|
||||
#endif /* CONFIG_X86_IO_APIC */
|
||||
|
||||
static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
|
||||
int count)
|
||||
static int
|
||||
check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
|
||||
{
|
||||
if (!mpc_new_phys) {
|
||||
pr_info("No spare slots, try to append...take your risk, "
|
||||
"new mpc_length %x\n", count);
|
||||
} else {
|
||||
if (count <= mpc_new_length)
|
||||
pr_info("No spare slots, try to append..., "
|
||||
"new mpc_length %x\n", count);
|
||||
else {
|
||||
pr_err("mpc_new_length %lx is too small\n",
|
||||
mpc_new_length);
|
||||
return -1;
|
||||
}
|
||||
int ret = 0;
|
||||
|
||||
if (!mpc_new_phys || count <= mpc_new_length) {
|
||||
WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init replace_intsrc_all(struct mpc_table *mpc,
|
||||
@@ -946,7 +940,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
|
||||
} else {
|
||||
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
|
||||
count += sizeof(struct mpc_intsrc);
|
||||
if (!check_slot(mpc_new_phys, mpc_new_length, count))
|
||||
if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
|
||||
goto out;
|
||||
assign_to_mpc_intsrc(&mp_irqs[i], m);
|
||||
mpc->length = count;
|
||||
@@ -963,11 +957,14 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __initdata enable_update_mptable;
|
||||
int enable_update_mptable;
|
||||
|
||||
static int __init update_mptable_setup(char *str)
|
||||
{
|
||||
enable_update_mptable = 1;
|
||||
#ifdef CONFIG_PCI
|
||||
pci_routeirq = 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
early_param("update_mptable", update_mptable_setup);
|
||||
@@ -980,6 +977,9 @@ static int __initdata alloc_mptable;
|
||||
static int __init parse_alloc_mptable_opt(char *p)
|
||||
{
|
||||
enable_update_mptable = 1;
|
||||
#ifdef CONFIG_PCI
|
||||
pci_routeirq = 1;
|
||||
#endif
|
||||
alloc_mptable = 1;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
@@ -996,24 +996,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/**
|
||||
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
|
||||
*
|
||||
* Description:
|
||||
* Perform any necessary interrupt initialisation prior to setting up
|
||||
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
|
||||
* interrupts should be initialised here if the machine emulates a PC
|
||||
* in any way.
|
||||
**/
|
||||
void __init x86_quirk_pre_intr_init(void)
|
||||
{
|
||||
if (x86_quirks->arch_pre_intr_init) {
|
||||
if (x86_quirks->arch_pre_intr_init())
|
||||
return;
|
||||
}
|
||||
init_ISA_irqs();
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_quirk_intr_init - post gate setup interrupt initialisation
|
||||
*
|
||||
|
@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
struct smp_ops smp_ops = {
|
||||
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = native_smp_prepare_cpus,
|
||||
.smp_cpus_done = native_smp_cpus_done,
|
||||
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = native_smp_prepare_cpus,
|
||||
.smp_cpus_done = native_smp_cpus_done,
|
||||
|
||||
.smp_send_stop = native_smp_send_stop,
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
.smp_send_stop = native_smp_send_stop,
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
|
||||
.cpu_up = native_cpu_up,
|
||||
.cpu_die = native_cpu_die,
|
||||
.cpu_disable = native_cpu_disable,
|
||||
.play_dead = native_play_dead,
|
||||
.cpu_up = native_cpu_up,
|
||||
.cpu_die = native_cpu_die,
|
||||
.cpu_disable = native_cpu_disable,
|
||||
.play_dead = native_play_dead,
|
||||
|
||||
.send_call_func_ipi = native_send_call_func_ipi,
|
||||
.send_call_func_ipi = native_send_call_func_ipi,
|
||||
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(smp_ops);
|
||||
|
@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid)
|
||||
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
|
||||
* won't ... remember to clear down the APIC, etc later.
|
||||
*/
|
||||
int __devinit
|
||||
int __cpuinit
|
||||
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
|
||||
{
|
||||
unsigned long send_status, accept_status = 0;
|
||||
@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
|
||||
return (send_status | accept_status);
|
||||
}
|
||||
|
||||
int __devinit
|
||||
static int __cpuinit
|
||||
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
{
|
||||
unsigned long send_status, accept_status = 0;
|
||||
@@ -822,10 +822,12 @@ do_rest:
|
||||
/* mark "stuck" area as not stuck */
|
||||
*((volatile unsigned long *)trampoline_base) = 0;
|
||||
|
||||
/*
|
||||
* Cleanup possible dangling ends...
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
|
||||
/*
|
||||
* Cleanup possible dangling ends...
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
|
||||
return boot_error;
|
||||
}
|
||||
@@ -990,10 +992,12 @@ static int __init smp_sanity_check(unsigned max_cpus)
|
||||
*/
|
||||
if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
|
||||
!cpu_has_apic) {
|
||||
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
|
||||
boot_cpu_physical_apicid);
|
||||
printk(KERN_ERR "... forcing use of dummy APIC emulation."
|
||||
if (!disable_apic) {
|
||||
pr_err("BIOS bug, local APIC #%d not detected!...\n",
|
||||
boot_cpu_physical_apicid);
|
||||
pr_err("... forcing use of dummy APIC emulation."
|
||||
"(tell your hw vendor)\n");
|
||||
}
|
||||
smpboot_clear_io_apic();
|
||||
arch_disable_smp_support();
|
||||
return -1;
|
||||
|
@@ -969,11 +969,8 @@ void __init trap_init(void)
|
||||
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
|
||||
set_bit(i, used_vectors);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#else
|
||||
set_bit(SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Should be a barrier for any external CPU state:
|
||||
*/
|
||||
|
Reference in New Issue
Block a user