Merge branch 'cpus4096' into irq/threaded
Conflicts: arch/parisc/kernel/irq.c kernel/irq/handle.c Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_setall(&desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
@@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
};
|
||||
|
||||
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
|
||||
@@ -115,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
printk(KERN_ERR "can not alloc kstat_irqs\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (!init_alloc_desc_masks(desc, cpu, false)) {
|
||||
printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
arch_init_chip_data(desc, cpu);
|
||||
}
|
||||
|
||||
@@ -123,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
*/
|
||||
DEFINE_SPINLOCK(sparse_irq_lock);
|
||||
|
||||
struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
|
||||
struct irq_desc **irq_desc_ptrs __read_mostly;
|
||||
|
||||
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS_LEGACY-1] = {
|
||||
@@ -133,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
/* FIXME: use bootmem alloc ...*/
|
||||
static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
|
||||
static unsigned int *kstat_irqs_legacy;
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
@@ -150,18 +149,30 @@ int __init early_irq_init(void)
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
/* initialize nr_irqs based on nr_cpu_ids */
|
||||
arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
|
||||
|
||||
desc = irq_desc_legacy;
|
||||
legacy_count = ARRAY_SIZE(irq_desc_legacy);
|
||||
|
||||
/* allocate irq_desc_ptrs array based on nr_irqs */
|
||||
irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
|
||||
|
||||
/* allocate based on nr_cpu_ids */
|
||||
/* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
|
||||
kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
|
||||
sizeof(int));
|
||||
|
||||
for (i = 0; i < legacy_count; i++) {
|
||||
desc[i].irq = i;
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy[i];
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
|
||||
init_alloc_desc_masks(&desc[i], 0, true);
|
||||
irq_desc_ptrs[i] = desc + i;
|
||||
}
|
||||
|
||||
for (i = legacy_count; i < NR_IRQS; i++)
|
||||
for (i = legacy_count; i < nr_irqs; i++)
|
||||
irq_desc_ptrs[i] = NULL;
|
||||
|
||||
return arch_early_irq_init();
|
||||
@@ -169,7 +180,10 @@ int __init early_irq_init(void)
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
|
||||
if (irq_desc_ptrs && irq < nr_irqs)
|
||||
return irq_desc_ptrs[irq];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
@@ -178,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
unsigned long flags;
|
||||
int node;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
|
||||
irq, NR_IRQS);
|
||||
WARN_ON(1);
|
||||
if (irq >= nr_irqs) {
|
||||
WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
|
||||
irq, nr_irqs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -223,9 +236,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@@ -238,14 +248,16 @@ int __init early_irq_init(void)
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
|
||||
|
||||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].irq = i;
|
||||
init_alloc_desc_masks(&desc[i], 0, true);
|
||||
desc[i].kstat_irqs = kstat_irqs_all[i];
|
||||
}
|
||||
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
|
@@ -17,7 +17,14 @@ extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||
extern spinlock_t sparse_irq_lock;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
/* irq_desc_ptrs allocated at boot time */
|
||||
extern struct irq_desc **irq_desc_ptrs;
|
||||
#else
|
||||
/* irq_desc_ptrs is a fixed size array */
|
||||
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
|
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
|
||||
cpumask_copy(&desc->affinity, cpumask);
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
} else {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
cpumask_copy(&desc->pending_mask, cpumask);
|
||||
cpumask_copy(desc->pending_mask, cpumask);
|
||||
}
|
||||
#else
|
||||
cpumask_copy(&desc->affinity, cpumask);
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
#endif
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
@@ -119,16 +119,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
* one of the targets is online.
|
||||
*/
|
||||
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
||||
if (cpumask_any_and(&desc->affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(desc->affinity, cpu_online_mask)
|
||||
< nr_cpu_ids)
|
||||
goto set_affinity;
|
||||
else
|
||||
desc->status &= ~IRQ_AFFINITY_SET;
|
||||
}
|
||||
|
||||
cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
|
||||
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
|
||||
set_affinity:
|
||||
desc->chip->set_affinity(irq, &desc->affinity);
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
|
||||
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
|
||||
if (unlikely(cpumask_empty(&desc->pending_mask)))
|
||||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
|
||||
* For correct operation this depends on the caller
|
||||
* masking the irqs.
|
||||
*/
|
||||
if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
|
||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||
< nr_cpu_ids)) {
|
||||
cpumask_and(&desc->affinity,
|
||||
&desc->pending_mask, cpu_online_mask);
|
||||
desc->chip->set_affinity(irq, &desc->affinity);
|
||||
cpumask_and(desc->affinity,
|
||||
desc->pending_mask, cpu_online_mask);
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
}
|
||||
cpumask_clear(&desc->pending_mask);
|
||||
cpumask_clear(desc->pending_mask);
|
||||
}
|
||||
|
||||
void move_native_irq(int irq)
|
||||
|
@@ -33,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
old_desc->kstat_irqs = NULL;
|
||||
}
|
||||
|
||||
static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu)
|
||||
{
|
||||
memcpy(desc, old_desc, sizeof(struct irq_desc));
|
||||
if (!init_alloc_desc_masks(desc, cpu, false)) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
|
||||
"for migration.\n", irq);
|
||||
return false;
|
||||
}
|
||||
spin_lock_init(&desc->lock);
|
||||
desc->cpu = cpu;
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
|
||||
init_copy_desc_masks(old_desc, desc);
|
||||
arch_init_copy_chip_data(old_desc, desc, cpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
@@ -71,12 +78,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc "
|
||||
"for migration.\n", irq);
|
||||
/* still use old one */
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
|
||||
if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
|
||||
/* still use old one */
|
||||
kfree(desc);
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
|
||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
const struct cpumask *mask = &desc->affinity;
|
||||
const struct cpumask *mask = desc->affinity;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PENDING)
|
||||
mask = &desc->pending_mask;
|
||||
mask = desc->pending_mask;
|
||||
#endif
|
||||
seq_cpumask(m, mask);
|
||||
seq_putc(m, '\n');
|
||||
|
Reference in New Issue
Block a user