Merge branch 'irq/for-x86' into irq/core
Get the infrastructure patches which are required for x86/apic into core
This commit is contained in:
@@ -59,8 +59,6 @@ enum {
|
||||
#include "debug.h"
|
||||
#include "settings.h"
|
||||
|
||||
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
|
||||
|
||||
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
unsigned long flags);
|
||||
extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
|
||||
@@ -170,27 +168,27 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
|
||||
*/
|
||||
static inline void irqd_set_move_pending(struct irq_data *d)
|
||||
{
|
||||
d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
|
||||
__irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING;
|
||||
}
|
||||
|
||||
static inline void irqd_clr_move_pending(struct irq_data *d)
|
||||
{
|
||||
d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
|
||||
__irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING;
|
||||
}
|
||||
|
||||
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
d->state_use_accessors &= ~mask;
|
||||
__irqd_to_state(d) &= ~mask;
|
||||
}
|
||||
|
||||
static inline void irqd_set(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
d->state_use_accessors |= mask;
|
||||
__irqd_to_state(d) |= mask;
|
||||
}
|
||||
|
||||
static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
return d->state_use_accessors & mask;
|
||||
return __irqd_to_state(d) & mask;
|
||||
}
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
|
||||
@@ -199,6 +197,11 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d
|
||||
__this_cpu_inc(kstat.irqs_sum);
|
||||
}
|
||||
|
||||
static inline int irq_desc_get_node(struct irq_desc *desc)
|
||||
{
|
||||
return irq_data_get_node(&desc->irq_data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
bool irq_pm_check_wakeup(struct irq_desc *desc);
|
||||
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
|
||||
|
@@ -59,16 +59,10 @@ static void desc_smp_init(struct irq_desc *desc, int node)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int desc_node(struct irq_desc *desc)
|
||||
{
|
||||
return desc->irq_data.node;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int
|
||||
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
|
||||
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
|
||||
static inline int desc_node(struct irq_desc *desc) { return 0; }
|
||||
#endif
|
||||
|
||||
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
@@ -76,6 +70,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
{
|
||||
int cpu;
|
||||
|
||||
desc->irq_data.common = &desc->irq_common_data;
|
||||
desc->irq_data.irq = irq;
|
||||
desc->irq_data.chip = &no_irq_chip;
|
||||
desc->irq_data.chip_data = NULL;
|
||||
@@ -299,7 +294,7 @@ static void free_desc(unsigned int irq)
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc_set_defaults(irq, desc, desc_node(desc), NULL);
|
||||
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -830,10 +830,12 @@ static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
|
||||
{
|
||||
struct irq_data *irq_data;
|
||||
|
||||
irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node);
|
||||
irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
|
||||
irq_data_get_node(child));
|
||||
if (irq_data) {
|
||||
child->parent_data = irq_data;
|
||||
irq_data->irq = child->irq;
|
||||
irq_data->common = child->common;
|
||||
irq_data->node = child->node;
|
||||
irq_data->domain = domain;
|
||||
}
|
||||
|
@@ -363,7 +363,7 @@ static int
|
||||
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
||||
{
|
||||
struct cpumask *set = irq_default_affinity;
|
||||
int node = desc->irq_data.node;
|
||||
int node = irq_desc_get_node(desc);
|
||||
|
||||
/* Excludes PER_CPU and NO_BALANCE interrupts */
|
||||
if (!irq_can_set_affinity(irq))
|
||||
|
@@ -7,21 +7,21 @@
|
||||
void irq_move_masked_irq(struct irq_data *idata)
|
||||
{
|
||||
struct irq_desc *desc = irq_data_to_desc(idata);
|
||||
struct irq_chip *chip = idata->chip;
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
|
||||
return;
|
||||
|
||||
irqd_clr_move_pending(&desc->irq_data);
|
||||
|
||||
/*
|
||||
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
||||
*/
|
||||
if (!irqd_can_balance(&desc->irq_data)) {
|
||||
if (irqd_is_per_cpu(&desc->irq_data)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
irqd_clr_move_pending(&desc->irq_data);
|
||||
|
||||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
@@ -52,6 +52,13 @@ void irq_move_irq(struct irq_data *idata)
|
||||
{
|
||||
bool masked;
|
||||
|
||||
/*
|
||||
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
|
||||
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
|
||||
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
|
||||
*/
|
||||
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
||||
|
||||
if (likely(!irqd_is_setaffinity_pending(idata)))
|
||||
return;
|
||||
|
||||
|
@@ -241,7 +241,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long) m->private);
|
||||
|
||||
seq_printf(m, "%d\n", desc->irq_data.node);
|
||||
seq_printf(m, "%d\n", irq_desc_get_node(desc));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user