Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits) x86: export vector_used_by_percpu_irq x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() sched: nominate preferred wakeup cpu, fix x86: fix lguest used_vectors breakage, -v2 x86: fix warning in arch/x86/kernel/io_apic.c sched: fix warning in kernel/sched.c sched: move test_sd_parent() to an SMP section of sched.h sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0 sched: activate active load balancing in new idle cpus sched: bias task wakeups to preferred semi-idle packages sched: nominate preferred wakeup cpu sched: favour lower logical cpu number for sched_mc balance sched: framework for sched_mc/smt_power_savings=N sched: convert BALANCE_FOR_xx_POWER to inline functions x86: use possible_cpus=NUM to extend the possible cpus allowed x86: fix cpu_mask_to_apicid_and to include cpu_online_mask x86: update io_apic.c to the new cpumask code x86: Introduce topology_core_cpumask()/topology_thread_cpumask() x86: xen: use smp_call_function_many() x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c ... Fixed up trivial conflict in kernel/time/tick-sched.c manually
This commit is contained in:
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
|
||||
|
||||
struct irq_cfg {
|
||||
struct irq_pin_list *irq_2_pin;
|
||||
cpumask_t domain;
|
||||
cpumask_t old_domain;
|
||||
cpumask_var_t domain;
|
||||
cpumask_var_t old_domain;
|
||||
unsigned move_cleanup_count;
|
||||
u8 vector;
|
||||
u8 move_in_progress : 1;
|
||||
@@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = {
|
||||
#else
|
||||
static struct irq_cfg irq_cfgx[NR_IRQS] = {
|
||||
#endif
|
||||
[0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
|
||||
[1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
|
||||
[2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
|
||||
[3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
|
||||
[4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
|
||||
[5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
|
||||
[6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
|
||||
[7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
|
||||
[8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
|
||||
[9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
|
||||
[10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
|
||||
[11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
|
||||
[12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
|
||||
[13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
|
||||
[14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
|
||||
[15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
|
||||
[0] = { .vector = IRQ0_VECTOR, },
|
||||
[1] = { .vector = IRQ1_VECTOR, },
|
||||
[2] = { .vector = IRQ2_VECTOR, },
|
||||
[3] = { .vector = IRQ3_VECTOR, },
|
||||
[4] = { .vector = IRQ4_VECTOR, },
|
||||
[5] = { .vector = IRQ5_VECTOR, },
|
||||
[6] = { .vector = IRQ6_VECTOR, },
|
||||
[7] = { .vector = IRQ7_VECTOR, },
|
||||
[8] = { .vector = IRQ8_VECTOR, },
|
||||
[9] = { .vector = IRQ9_VECTOR, },
|
||||
[10] = { .vector = IRQ10_VECTOR, },
|
||||
[11] = { .vector = IRQ11_VECTOR, },
|
||||
[12] = { .vector = IRQ12_VECTOR, },
|
||||
[13] = { .vector = IRQ13_VECTOR, },
|
||||
[14] = { .vector = IRQ14_VECTOR, },
|
||||
[15] = { .vector = IRQ15_VECTOR, },
|
||||
};
|
||||
|
||||
int __init arch_early_irq_init(void)
|
||||
@@ -183,6 +183,10 @@ int __init arch_early_irq_init(void)
|
||||
for (i = 0; i < count; i++) {
|
||||
desc = irq_to_desc(i);
|
||||
desc->chip_data = &cfg[i];
|
||||
alloc_bootmem_cpumask_var(&cfg[i].domain);
|
||||
alloc_bootmem_cpumask_var(&cfg[i].old_domain);
|
||||
if (i < NR_IRQS_LEGACY)
|
||||
cpumask_setall(cfg[i].domain);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -209,6 +213,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
/* FIXME: needs alloc_cpumask_var_node() */
|
||||
if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else {
|
||||
cpumask_clear(cfg->domain);
|
||||
cpumask_clear(cfg->old_domain);
|
||||
}
|
||||
}
|
||||
printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
|
||||
|
||||
return cfg;
|
||||
@@ -333,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
}
|
||||
}
|
||||
|
||||
static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static void
|
||||
set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg = desc->chip_data;
|
||||
|
||||
if (!cfg->move_in_progress) {
|
||||
/* it means that domain is not changed */
|
||||
if (!cpus_intersects(desc->affinity, mask))
|
||||
if (!cpumask_intersects(&desc->affinity, mask))
|
||||
cfg->move_desc_pending = 1;
|
||||
}
|
||||
}
|
||||
@@ -354,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static inline void
|
||||
set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@@ -485,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void send_cleanup_vector(struct irq_cfg *cfg)
|
||||
{
|
||||
cpumask_var_t cleanup_mask;
|
||||
|
||||
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
|
||||
unsigned int i;
|
||||
cfg->move_cleanup_count = 0;
|
||||
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
||||
cfg->move_cleanup_count++;
|
||||
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
||||
send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
|
||||
} else {
|
||||
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
|
||||
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
free_cpumask_var(cleanup_mask);
|
||||
}
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
|
||||
{
|
||||
int apic, pin;
|
||||
@@ -520,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
|
||||
}
|
||||
}
|
||||
|
||||
static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
|
||||
static int
|
||||
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
|
||||
|
||||
static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
/*
|
||||
* Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
|
||||
* of that, or returns BAD_APICID and leaves desc->affinity untouched.
|
||||
*/
|
||||
static unsigned int
|
||||
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
unsigned long flags;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
unsigned int irq;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
return;
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return BAD_APICID;
|
||||
|
||||
irq = desc->irq;
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return;
|
||||
return BAD_APICID;
|
||||
|
||||
cpumask_and(&desc->affinity, cfg->domain, mask);
|
||||
set_extra_move_desc(desc, mask);
|
||||
return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
|
||||
}
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
/*
|
||||
* Only the high 8 bits are valid.
|
||||
*/
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
static void
|
||||
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
unsigned long flags;
|
||||
unsigned int dest;
|
||||
unsigned int irq;
|
||||
|
||||
irq = desc->irq;
|
||||
cfg = desc->chip_data;
|
||||
|
||||
spin_lock_irqsave(&ioapic_lock, flags);
|
||||
__target_IO_APIC_irq(irq, dest, cfg);
|
||||
desc->affinity = mask;
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest != BAD_APICID) {
|
||||
/* Only the high 8 bits are valid. */
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
__target_IO_APIC_irq(irq, dest, cfg);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
}
|
||||
|
||||
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
|
||||
static void
|
||||
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
@@ -1222,7 +1276,8 @@ void unlock_vector_lock(void)
|
||||
spin_unlock(&vector_lock);
|
||||
}
|
||||
|
||||
static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
static int
|
||||
__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
||||
{
|
||||
/*
|
||||
* NOTE! The local APIC isn't very good at handling
|
||||
@@ -1237,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
*/
|
||||
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
|
||||
unsigned int old_vector;
|
||||
int cpu;
|
||||
int cpu, err;
|
||||
cpumask_var_t tmp_mask;
|
||||
|
||||
if ((cfg->move_in_progress) || cfg->move_cleanup_count)
|
||||
return -EBUSY;
|
||||
|
||||
/* Only try and allocate irqs on cpus that are present */
|
||||
cpus_and(mask, mask, cpu_online_map);
|
||||
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
|
||||
old_vector = cfg->vector;
|
||||
if (old_vector) {
|
||||
cpumask_t tmp;
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
if (!cpus_empty(tmp))
|
||||
cpumask_and(tmp_mask, mask, cpu_online_mask);
|
||||
cpumask_and(tmp_mask, cfg->domain, tmp_mask);
|
||||
if (!cpumask_empty(tmp_mask)) {
|
||||
free_cpumask_var(tmp_mask);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
cpumask_t domain, new_mask;
|
||||
/* Only try and allocate irqs on cpus that are present */
|
||||
err = -ENOSPC;
|
||||
for_each_cpu_and(cpu, mask, cpu_online_mask) {
|
||||
int new_cpu;
|
||||
int vector, offset;
|
||||
|
||||
domain = vector_allocation_domain(cpu);
|
||||
cpus_and(new_mask, domain, cpu_online_map);
|
||||
vector_allocation_domain(cpu, tmp_mask);
|
||||
|
||||
vector = current_vector;
|
||||
offset = current_offset;
|
||||
next:
|
||||
vector += 8;
|
||||
if (vector >= first_system_vector) {
|
||||
/* If we run out of vectors on large boxen, must share them. */
|
||||
/* If out of vectors on large boxen, must share them. */
|
||||
offset = (offset + 1) % 8;
|
||||
vector = FIRST_DEVICE_VECTOR + offset;
|
||||
}
|
||||
if (unlikely(current_vector == vector))
|
||||
continue;
|
||||
#ifdef CONFIG_X86_64
|
||||
if (vector == IA32_SYSCALL_VECTOR)
|
||||
|
||||
if (test_bit(vector, used_vectors))
|
||||
goto next;
|
||||
#else
|
||||
if (vector == SYSCALL_VECTOR)
|
||||
goto next;
|
||||
#endif
|
||||
for_each_cpu_mask_nr(new_cpu, new_mask)
|
||||
|
||||
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
|
||||
if (per_cpu(vector_irq, new_cpu)[vector] != -1)
|
||||
goto next;
|
||||
/* Found one! */
|
||||
@@ -1287,18 +1342,21 @@ next:
|
||||
current_offset = offset;
|
||||
if (old_vector) {
|
||||
cfg->move_in_progress = 1;
|
||||
cfg->old_domain = cfg->domain;
|
||||
cpumask_copy(cfg->old_domain, cfg->domain);
|
||||
}
|
||||
for_each_cpu_mask_nr(new_cpu, new_mask)
|
||||
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
|
||||
per_cpu(vector_irq, new_cpu)[vector] = irq;
|
||||
cfg->vector = vector;
|
||||
cfg->domain = domain;
|
||||
return 0;
|
||||
cpumask_copy(cfg->domain, tmp_mask);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
return -ENOSPC;
|
||||
free_cpumask_var(tmp_mask);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
static int
|
||||
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
@@ -1311,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
|
||||
static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int cpu, vector;
|
||||
|
||||
BUG_ON(!cfg->vector);
|
||||
|
||||
vector = cfg->vector;
|
||||
cpus_and(mask, cfg->domain, cpu_online_map);
|
||||
for_each_cpu_mask_nr(cpu, mask)
|
||||
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||
|
||||
cfg->vector = 0;
|
||||
cpus_clear(cfg->domain);
|
||||
cpumask_clear(cfg->domain);
|
||||
|
||||
if (likely(!cfg->move_in_progress))
|
||||
return;
|
||||
cpus_and(mask, cfg->old_domain, cpu_online_map);
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
||||
vector++) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
||||
@@ -1350,7 +1405,7 @@ void __setup_vector_irq(int cpu)
|
||||
/* Mark the inuse vectors */
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cfg = desc->chip_data;
|
||||
if (!cpu_isset(cpu, cfg->domain))
|
||||
if (!cpumask_test_cpu(cpu, cfg->domain))
|
||||
continue;
|
||||
vector = cfg->vector;
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
@@ -1362,7 +1417,7 @@ void __setup_vector_irq(int cpu)
|
||||
continue;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
if (!cpu_isset(cpu, cfg->domain))
|
||||
if (!cpumask_test_cpu(cpu, cfg->domain))
|
||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||
}
|
||||
}
|
||||
@@ -1498,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
struct IO_APIC_route_entry entry;
|
||||
cpumask_t mask;
|
||||
unsigned int dest;
|
||||
|
||||
if (!IO_APIC_IRQ(irq))
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
|
||||
mask = TARGET_CPUS;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
if (assign_irq_vector(irq, cfg, TARGET_CPUS))
|
||||
return;
|
||||
|
||||
cpus_and(mask, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
apic_printk(APIC_VERBOSE,KERN_DEBUG
|
||||
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
|
||||
@@ -1519,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
|
||||
|
||||
|
||||
if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
|
||||
cpu_mask_to_apicid(mask), trigger, polarity,
|
||||
cfg->vector)) {
|
||||
dest, trigger, polarity, cfg->vector)) {
|
||||
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
|
||||
mp_ioapics[apic].mp_apicid, pin);
|
||||
__clear_irq_vector(irq, cfg);
|
||||
@@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
|
||||
send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
return 1;
|
||||
@@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
|
||||
* as simple as edge triggered migration and we can do the irq migration
|
||||
* with a simple atomic update to IO-APIC RTE.
|
||||
*/
|
||||
static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static void
|
||||
migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
int modify_ioapic_rte;
|
||||
unsigned int dest;
|
||||
unsigned long flags;
|
||||
unsigned int irq;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return;
|
||||
|
||||
irq = desc->irq;
|
||||
@@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, mask);
|
||||
|
||||
modify_ioapic_rte = desc->status & IRQ_LEVEL;
|
||||
if (modify_ioapic_rte) {
|
||||
@@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
*/
|
||||
modify_irte(irq, &irte);
|
||||
|
||||
if (cfg->move_in_progress) {
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
desc->affinity = mask;
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
|
||||
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
|
||||
@@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
|
||||
}
|
||||
|
||||
/* everthing is clear. we have right of way */
|
||||
migrate_ioapic_irq_desc(desc, desc->pending_mask);
|
||||
migrate_ioapic_irq_desc(desc, &desc->pending_mask);
|
||||
|
||||
ret = 0;
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
cpus_clear(desc->pending_mask);
|
||||
cpumask_clear(&desc->pending_mask);
|
||||
|
||||
unmask:
|
||||
unmask_IO_APIC_irq_desc(desc);
|
||||
@@ -2389,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work)
|
||||
continue;
|
||||
}
|
||||
|
||||
desc->chip->set_affinity(irq, desc->pending_mask);
|
||||
desc->chip->set_affinity(irq, &desc->pending_mask);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
@@ -2398,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work)
|
||||
/*
|
||||
* Migrates the IRQ destination in the process context.
|
||||
*/
|
||||
static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
if (desc->status & IRQ_LEVEL) {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
desc->pending_mask = mask;
|
||||
cpumask_copy(&desc->pending_mask, mask);
|
||||
migrate_irq_remapped_level_desc(desc);
|
||||
return;
|
||||
}
|
||||
|
||||
migrate_ioapic_irq_desc(desc, mask);
|
||||
}
|
||||
static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
|
||||
static void set_ir_ioapic_affinity_irq(unsigned int irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
@@ -2444,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
if (!cfg->move_cleanup_count)
|
||||
goto unlock;
|
||||
|
||||
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
goto unlock;
|
||||
|
||||
__get_cpu_var(vector_irq)[vector] = -1;
|
||||
@@ -2481,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp)
|
||||
|
||||
vector = ~get_irq_regs()->orig_ax;
|
||||
me = smp_processor_id();
|
||||
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
|
||||
cpumask_t cleanup_mask;
|
||||
|
||||
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
*descp = desc = move_irq_desc(desc, me);
|
||||
/* get the new one */
|
||||
cfg = desc->chip_data;
|
||||
#endif
|
||||
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
#else
|
||||
static inline void irq_complete_move(struct irq_desc **descp) {}
|
||||
@@ -3216,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
|
||||
struct irq_cfg *cfg;
|
||||
int err;
|
||||
unsigned dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
tmp = TARGET_CPUS;
|
||||
err = assign_irq_vector(irq, cfg, tmp);
|
||||
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cpus_and(tmp, cfg->domain, tmp);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (irq_remapped(irq)) {
|
||||
@@ -3279,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
read_msi_msg_desc(desc, &msg);
|
||||
|
||||
@@ -3308,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
write_msi_msg_desc(desc, &msg);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
/*
|
||||
* Migrate the MSI irq to another cpumask. This migration is
|
||||
* done in the process context using interrupt-remapping hardware.
|
||||
*/
|
||||
static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
static void
|
||||
ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
struct irq_cfg *cfg = desc->chip_data;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
return;
|
||||
|
||||
if (get_irte(irq, &irte))
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
irte.vector = cfg->vector;
|
||||
irte.dest_id = IRTE_DEST(dest);
|
||||
|
||||
@@ -3352,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
* vector allocation.
|
||||
*/
|
||||
if (cfg->move_in_progress) {
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
desc->affinity = mask;
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -3550,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq)
|
||||
|
||||
#ifdef CONFIG_DMAR
|
||||
#ifdef CONFIG_SMP
|
||||
static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
||||
static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
dmar_msi_read(irq, &msg);
|
||||
|
||||
@@ -3579,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
@@ -3613,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq)
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
||||
static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
hpet_msi_read(irq, &msg);
|
||||
|
||||
@@ -3642,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
hpet_msi_write(irq, &msg);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
@@ -3697,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
}
|
||||
|
||||
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
target_ht_irq(irq, dest, cfg->vector);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -3738,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
int err;
|
||||
cpumask_t tmp;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
tmp = TARGET_CPUS;
|
||||
err = assign_irq_vector(irq, cfg, tmp);
|
||||
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
|
||||
if (!err) {
|
||||
struct ht_irq_msg msg;
|
||||
unsigned dest;
|
||||
|
||||
cpus_and(tmp, cfg->domain, tmp);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
|
||||
|
||||
@@ -3784,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
unsigned long mmr_offset)
|
||||
{
|
||||
const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
|
||||
const struct cpumask *eligible_cpu = cpumask_of(cpu);
|
||||
struct irq_cfg *cfg;
|
||||
int mmr_pnode;
|
||||
unsigned long mmr_value;
|
||||
@@ -3794,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
|
||||
err = assign_irq_vector(irq, cfg, *eligible_cpu);
|
||||
err = assign_irq_vector(irq, cfg, eligible_cpu);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
@@ -3813,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
entry->polarity = 0;
|
||||
entry->trigger = 0;
|
||||
entry->mask = 0;
|
||||
entry->dest = cpu_mask_to_apicid(*eligible_cpu);
|
||||
entry->dest = cpu_mask_to_apicid(eligible_cpu);
|
||||
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
@@ -4024,7 +4010,7 @@ void __init setup_ioapic_dest(void)
|
||||
int pin, ioapic, irq, irq_entry;
|
||||
struct irq_desc *desc;
|
||||
struct irq_cfg *cfg;
|
||||
cpumask_t mask;
|
||||
const struct cpumask *mask;
|
||||
|
||||
if (skip_ioapic_setup == 1)
|
||||
return;
|
||||
@@ -4055,7 +4041,7 @@ void __init setup_ioapic_dest(void)
|
||||
*/
|
||||
if (desc->status &
|
||||
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
|
||||
mask = desc->affinity;
|
||||
mask = &desc->affinity;
|
||||
else
|
||||
mask = TARGET_CPUS;
|
||||
|
||||
|
Reference in New Issue
Block a user