Merge branch 'for-ingo' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6 into x86/apic
Merge reason: the SFI (Simple Firmware Interface) feature in the ACPI tree needs this cleanup, pull it into the APIC branch as well so that there's no interactions. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -652,7 +652,8 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
|
||||
return ret && es7000_apic_is_cluster();
|
||||
}
|
||||
|
||||
struct apic apic_es7000_cluster = {
|
||||
/* We've been warned by a false positive warning.Use __refdata to keep calm. */
|
||||
struct apic __refdata apic_es7000_cluster = {
|
||||
|
||||
.name = "es7000",
|
||||
.probe = probe_es7000,
|
||||
|
@@ -87,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS];
|
||||
struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
|
||||
int nr_ioapics;
|
||||
|
||||
/* IO APIC gsi routing info */
|
||||
struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
|
||||
|
||||
/* MP IRQ source entries */
|
||||
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
|
||||
|
||||
@@ -3736,6 +3739,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
@@ -3885,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
return __io_apic_set_pci_routing(dev, irq, irq_attr);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
ACPI-based IOAPIC Configuration
|
||||
-------------------------------------------------------------------------- */
|
||||
u8 __init io_apic_unique_id(u8 id)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
|
||||
!APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
|
||||
return io_apic_get_unique_id(nr_ioapics, id);
|
||||
else
|
||||
return id;
|
||||
#else
|
||||
int i;
|
||||
DECLARE_BITMAP(used, 256);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
bitmap_zero(used, 256);
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
struct mpc_ioapic *ia = &mp_ioapics[i];
|
||||
__set_bit(ia->apicid, used);
|
||||
}
|
||||
if (!test_bit(id, used))
|
||||
return id;
|
||||
return find_first_zero_bit(used, 256);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
@@ -3998,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/*
|
||||
* This function currently is only a helper for the i386 smp boot process where
|
||||
* we need to reprogram the ioredtbls to cater for the cpus which have come online
|
||||
@@ -4124,28 +4145,93 @@ fake_ioapic_page:
|
||||
}
|
||||
}
|
||||
|
||||
static int __init ioapic_insert_resources(void)
|
||||
void __init ioapic_insert_resources(void)
|
||||
{
|
||||
int i;
|
||||
struct resource *r = ioapic_resources;
|
||||
|
||||
if (!r) {
|
||||
if (nr_ioapics > 0) {
|
||||
if (nr_ioapics > 0)
|
||||
printk(KERN_ERR
|
||||
"IO APIC resources couldn't be allocated.\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
insert_resource(&iomem_resource, r);
|
||||
r++;
|
||||
}
|
||||
}
|
||||
|
||||
int mp_find_ioapic(int gsi)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
/* Find the IOAPIC that manages this GSI. */
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
if ((gsi >= mp_gsi_routing[i].gsi_base)
|
||||
&& (gsi <= mp_gsi_routing[i].gsi_end))
|
||||
return i;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int mp_find_ioapic_pin(int ioapic, int gsi)
|
||||
{
|
||||
if (WARN_ON(ioapic == -1))
|
||||
return -1;
|
||||
if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
|
||||
return -1;
|
||||
|
||||
return gsi - mp_gsi_routing[ioapic].gsi_base;
|
||||
}
|
||||
|
||||
static int bad_ioapic(unsigned long address)
|
||||
{
|
||||
if (nr_ioapics >= MAX_IO_APICS) {
|
||||
printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
|
||||
"(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
|
||||
return 1;
|
||||
}
|
||||
if (!address) {
|
||||
printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
|
||||
" found in table, skipping!\n");
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Insert the IO APIC resources after PCI initialization has occured to handle
|
||||
* IO APICS that are mapped in on a BAR in PCI space. */
|
||||
late_initcall(ioapic_insert_resources);
|
||||
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
{
|
||||
int idx = 0;
|
||||
|
||||
if (bad_ioapic(address))
|
||||
return;
|
||||
|
||||
idx = nr_ioapics;
|
||||
|
||||
mp_ioapics[idx].type = MP_IOAPIC;
|
||||
mp_ioapics[idx].flags = MPC_APIC_USABLE;
|
||||
mp_ioapics[idx].apicaddr = address;
|
||||
|
||||
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
|
||||
mp_ioapics[idx].apicid = io_apic_unique_id(id);
|
||||
mp_ioapics[idx].apicver = io_apic_get_version(idx);
|
||||
|
||||
/*
|
||||
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
|
||||
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
|
||||
*/
|
||||
mp_gsi_routing[idx].gsi_base = gsi_base;
|
||||
mp_gsi_routing[idx].gsi_end = gsi_base +
|
||||
io_apic_get_redir_entries(idx);
|
||||
|
||||
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
|
||||
"GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
|
||||
mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
|
||||
mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
|
||||
|
||||
nr_ioapics++;
|
||||
}
|
||||
|
@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ONCE(!mask, "empty IPI mask"))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
|
||||
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
|
||||
|
@@ -493,7 +493,8 @@ static void numaq_setup_portio_remap(void)
|
||||
(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
|
||||
}
|
||||
|
||||
struct apic apic_numaq = {
|
||||
/* Use __refdata to keep false positive warning calm. */
|
||||
struct apic __refdata apic_numaq = {
|
||||
|
||||
.name = "NUMAQ",
|
||||
.probe = probe_numaq,
|
||||
|
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return x2apic_enabled();
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -170,7 +172,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
@@ -162,7 +164,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
@@ -46,7 +46,7 @@ static int early_get_nodeid(void)
|
||||
return node_id.s.node_id;
|
||||
}
|
||||
|
||||
static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
if (!strcmp(oem_id, "SGI")) {
|
||||
if (!strcmp(oem_table_id, "UVL"))
|
||||
@@ -253,7 +253,7 @@ static void uv_send_IPI_self(int vector)
|
||||
apic_write(APIC_SELF_IPI, vector);
|
||||
}
|
||||
|
||||
struct apic apic_x2apic_uv_x = {
|
||||
struct apic __refdata apic_x2apic_uv_x = {
|
||||
|
||||
.name = "UV large system",
|
||||
.probe = NULL,
|
||||
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = {
|
||||
.apic_id_registered = uv_apic_id_registered,
|
||||
|
||||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 1, /* logical */
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = uv_target_cpus,
|
||||
.disable_esr = 0,
|
||||
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static __init void map_low_mmrs(void)
|
||||
{
|
||||
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
|
||||
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
|
||||
}
|
||||
|
||||
enum map_type {map_wb, map_uc};
|
||||
|
||||
static __init void map_high(char *id, unsigned long base, int shift,
|
||||
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode)
|
||||
map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
|
||||
}
|
||||
|
||||
static __init void map_config_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
|
||||
int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
|
||||
if (cfg.s.enable)
|
||||
map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmr_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
|
||||
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
|
||||
if (mmr.s.enable)
|
||||
map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmioh_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
||||
@@ -566,8 +540,6 @@ void __init uv_system_init(void)
|
||||
unsigned long mmr_base, present, paddr;
|
||||
unsigned short pnode_mask;
|
||||
|
||||
map_low_mmrs();
|
||||
|
||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||
m_val = m_n_config.s.m_skt;
|
||||
n_val = m_n_config.s.n_skt;
|
||||
@@ -591,6 +563,8 @@ void __init uv_system_init(void)
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
for (blade = 0; blade < uv_num_possible_blades(); blade++)
|
||||
uv_blade_info[blade].memory_nid = -1;
|
||||
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
|
||||
@@ -629,6 +603,9 @@ void __init uv_system_init(void)
|
||||
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
||||
uv_blade_info[blade].nr_possible_cpus++;
|
||||
|
||||
/* Any node on the blade, else will contain -1. */
|
||||
uv_blade_info[blade].memory_nid = nid;
|
||||
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||
@@ -662,11 +639,10 @@ void __init uv_system_init(void)
|
||||
pnode = (paddr >> m_val) & pnode_mask;
|
||||
blade = boot_pnode_to_blade(pnode);
|
||||
uv_node_to_blade[nid] = blade;
|
||||
max_pnode = max(pnode, max_pnode);
|
||||
}
|
||||
|
||||
map_gru_high(max_pnode);
|
||||
map_mmr_high(max_pnode);
|
||||
map_config_high(max_pnode);
|
||||
map_mmioh_high(max_pnode);
|
||||
|
||||
uv_cpu_init();
|
||||
|
Reference in New Issue
Block a user