Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The irq department delivers: - new core infrastructure to allow better management of multi-queue devices (interrupt spreading, node aware descriptor allocation ...) - a new interrupt flow handler to support the new fangled Intel VMD devices. - yet another new interrupt controller driver. - a series of fixes which addresses sparse warnings, missing includes, missing static declarations etc from Ben Dooks. - a fix for the error handling in the hierarchical domain allocation code. - the usual pile of small updates to core and driver code" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits) genirq: Fix missing irq allocation affinity hint irqdomain: Fix irq_domain_alloc_irqs_recursive() error handling irq/Documentation: Correct result of echnoing 5 to smp_affinity MAINTAINERS: Remove Jiang Liu from irq domains genirq/msi: Fix broken debug output genirq: Add a helper to spread an affinity mask for MSI/MSI-X vectors genirq/msi: Make use of affinity aware allocations genirq: Use affinity hint in irqdesc allocation genirq: Add affinity hint to irq allocation genirq: Introduce IRQD_AFFINITY_MANAGED flag genirq/msi: Remove unused MSI_FLAG_IDENTITY_MAP irqchip/s3c24xx: Fixup IO accessors for big endian irqchip/exynos-combiner: Fix usage of __raw IO irqdomain: Fix disposal of mappings for interrupt hierarchies irqchip/aspeed-vic: Add irq controller for Aspeed doc/devicetree: Add Aspeed VIC bindings x86/PCI/VMD: Use untracked irq handler genirq: Add untracked irq handler irqchip/mips-gic: Populate irq_domain names irqchip/gicv3-its: Implement two-level(indirect) device table support ...
This commit is contained in:
@@ -9,3 +9,4 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
|
||||
obj-$(CONFIG_PM_SLEEP) += pm.o
|
||||
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
|
||||
obj-$(CONFIG_SMP) += affinity.o
|
||||
|
61
kernel/irq/affinity.c
Normal file
61
kernel/irq/affinity.c
Normal file
@@ -0,0 +1,61 @@
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
static int get_first_sibling(unsigned int cpu)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = cpumask_first(topology_sibling_cpumask(cpu));
|
||||
if (ret < nr_cpu_ids)
|
||||
return ret;
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take a map of online CPUs and the number of available interrupt vectors
|
||||
* and generate an output cpumask suitable for spreading MSI/MSI-X vectors
|
||||
* so that they are distributed as good as possible around the CPUs. If
|
||||
* more vectors than CPUs are available we'll map one to each CPU,
|
||||
* otherwise we map one to the first sibling of each socket.
|
||||
*
|
||||
* If there are more vectors than CPUs we will still only have one bit
|
||||
* set per CPU, but interrupt code will keep on assigning the vectors from
|
||||
* the start of the bitmap until we run out of vectors.
|
||||
*/
|
||||
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
||||
{
|
||||
struct cpumask *affinity_mask;
|
||||
unsigned int max_vecs = *nr_vecs;
|
||||
|
||||
if (max_vecs == 1)
|
||||
return NULL;
|
||||
|
||||
affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL);
|
||||
if (!affinity_mask) {
|
||||
*nr_vecs = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (max_vecs >= num_online_cpus()) {
|
||||
cpumask_copy(affinity_mask, cpu_online_mask);
|
||||
*nr_vecs = num_online_cpus();
|
||||
} else {
|
||||
unsigned int vecs = 0, cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == get_first_sibling(cpu)) {
|
||||
cpumask_set_cpu(cpu, affinity_mask);
|
||||
vecs++;
|
||||
}
|
||||
|
||||
if (--max_vecs == 0)
|
||||
break;
|
||||
}
|
||||
*nr_vecs = vecs;
|
||||
}
|
||||
|
||||
return affinity_mask;
|
||||
}
|
@@ -426,6 +426,49 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_simple_irq);
|
||||
|
||||
/**
|
||||
* handle_untracked_irq - Simple and software-decoded IRQs.
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Untracked interrupts are sent from a demultiplexing interrupt
|
||||
* handler when the demultiplexer does not know which device it its
|
||||
* multiplexed irq domain generated the interrupt. IRQ's handled
|
||||
* through here are not subjected to stats tracking, randomness, or
|
||||
* spurious interrupt detection.
|
||||
*
|
||||
* Note: Like handle_simple_irq, the caller is expected to handle
|
||||
* the ack, clear, mask and unmask issues if necessary.
|
||||
*/
|
||||
void handle_untracked_irq(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
desc->istate &= ~IRQS_PENDING;
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
__handle_irq_event_percpu(desc, &flags);
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_untracked_irq);
|
||||
|
||||
/*
|
||||
* Called unconditionally from handle_level_irq() and only for oneshot
|
||||
* interrupts from handle_fasteoi_irq()
|
||||
@@ -1093,3 +1136,43 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_pm_get - Enable power for an IRQ chip
|
||||
* @data: Pointer to interrupt specific data
|
||||
*
|
||||
* Enable the power to the IRQ chip referenced by the interrupt data
|
||||
* structure.
|
||||
*/
|
||||
int irq_chip_pm_get(struct irq_data *data)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
|
||||
retval = pm_runtime_get_sync(data->chip->parent_device);
|
||||
if (retval < 0) {
|
||||
pm_runtime_put_noidle(data->chip->parent_device);
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_pm_put - Disable power for an IRQ chip
|
||||
* @data: Pointer to interrupt specific data
|
||||
*
|
||||
* Disable the power to the IRQ chip referenced by the interrupt data
|
||||
* structure, belongs. Note that power will only be disabled, once this
|
||||
* function has been called for all IRQs that have called irq_chip_pm_get().
|
||||
*/
|
||||
int irq_chip_pm_put(struct irq_data *data)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
|
||||
retval = pm_runtime_put(data->chip->parent_device);
|
||||
|
||||
return (retval < 0) ? retval : 0;
|
||||
}
|
||||
|
@@ -132,10 +132,10 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
|
||||
wake_up_process(action->thread);
|
||||
}
|
||||
|
||||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
unsigned int flags = 0, irq = desc->irq_data.irq;
|
||||
unsigned int irq = desc->irq_data.irq;
|
||||
struct irqaction *action;
|
||||
|
||||
for_each_action_of_desc(desc, action) {
|
||||
@@ -164,7 +164,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
||||
|
||||
/* Fall through to add to randomness */
|
||||
case IRQ_HANDLED:
|
||||
flags |= action->flags;
|
||||
*flags |= action->flags;
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -174,7 +174,17 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
||||
retval |= res;
|
||||
}
|
||||
|
||||
add_interrupt_randomness(irq, flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
|
||||
{
|
||||
irqreturn_t retval;
|
||||
unsigned int flags = 0;
|
||||
|
||||
retval = __handle_irq_event_percpu(desc, &flags);
|
||||
|
||||
add_interrupt_randomness(desc->irq_data.irq, flags);
|
||||
|
||||
if (!noirqdebug)
|
||||
note_interrupt(desc, retval);
|
||||
|
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
|
||||
@@ -83,6 +84,7 @@ extern void irq_mark_irq(unsigned int irq);
|
||||
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
|
||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
|
||||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
|
||||
irqreturn_t handle_irq_event(struct irq_desc *desc);
|
||||
|
||||
@@ -105,6 +107,8 @@ static inline void unregister_handler_proc(unsigned int irq,
|
||||
struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
||||
|
||||
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
||||
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
@@ -76,14 +76,14 @@ int irq_reserve_ipi(struct irq_domain *domain,
|
||||
}
|
||||
}
|
||||
|
||||
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
|
||||
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
|
||||
if (virq <= 0) {
|
||||
pr_warn("Can't reserve IPI, failed to alloc descs\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
|
||||
(void *) dest, true);
|
||||
(void *) dest, true, NULL);
|
||||
|
||||
if (virq <= 0) {
|
||||
pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
|
||||
|
@@ -68,9 +68,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void desc_smp_init(struct irq_desc *desc, int node)
|
||||
static void desc_smp_init(struct irq_desc *desc, int node,
|
||||
const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
|
||||
if (!affinity)
|
||||
affinity = irq_default_affinity;
|
||||
cpumask_copy(desc->irq_common_data.affinity, affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
@@ -82,11 +86,12 @@ static void desc_smp_init(struct irq_desc *desc, int node)
|
||||
#else
|
||||
static inline int
|
||||
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
|
||||
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
|
||||
static inline void
|
||||
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
|
||||
#endif
|
||||
|
||||
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
struct module *owner)
|
||||
const struct cpumask *affinity, struct module *owner)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@@ -107,7 +112,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
desc->owner = owner;
|
||||
for_each_possible_cpu(cpu)
|
||||
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
|
||||
desc_smp_init(desc, node);
|
||||
desc_smp_init(desc, node, affinity);
|
||||
}
|
||||
|
||||
int nr_irqs = NR_IRQS;
|
||||
@@ -158,7 +163,9 @@ void irq_unlock_sparse(void)
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
||||
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
|
||||
const struct cpumask *affinity,
|
||||
struct module *owner)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
@@ -178,7 +185,8 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_rcu_head(&desc->rcu);
|
||||
|
||||
desc_set_defaults(irq, desc, node, owner);
|
||||
desc_set_defaults(irq, desc, node, affinity, owner);
|
||||
irqd_set(&desc->irq_data, flags);
|
||||
|
||||
return desc;
|
||||
|
||||
@@ -223,13 +231,32 @@ static void free_desc(unsigned int irq)
|
||||
}
|
||||
|
||||
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
struct module *owner)
|
||||
const struct cpumask *affinity, struct module *owner)
|
||||
{
|
||||
const struct cpumask *mask = NULL;
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
unsigned int flags;
|
||||
int i, cpu = -1;
|
||||
|
||||
if (affinity && cpumask_empty(affinity))
|
||||
return -EINVAL;
|
||||
|
||||
flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
desc = alloc_desc(start + i, node, owner);
|
||||
if (affinity) {
|
||||
cpu = cpumask_next(cpu, affinity);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = cpumask_first(affinity);
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
/*
|
||||
* For single allocations we use the caller provided
|
||||
* mask otherwise we use the mask of the target cpu
|
||||
*/
|
||||
mask = cnt == 1 ? affinity : cpumask_of(cpu);
|
||||
}
|
||||
desc = alloc_desc(start + i, node, flags, mask, owner);
|
||||
if (!desc)
|
||||
goto err;
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
@@ -277,7 +304,7 @@ int __init early_irq_init(void)
|
||||
nr_irqs = initcnt;
|
||||
|
||||
for (i = 0; i < initcnt; i++) {
|
||||
desc = alloc_desc(i, node, NULL);
|
||||
desc = alloc_desc(i, node, 0, NULL, NULL);
|
||||
set_bit(i, allocated_irqs);
|
||||
irq_insert_desc(i, desc);
|
||||
}
|
||||
@@ -311,7 +338,7 @@ int __init early_irq_init(void)
|
||||
alloc_masks(&desc[i], GFP_KERNEL, node);
|
||||
raw_spin_lock_init(&desc[i].lock);
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
desc_set_defaults(i, &desc[i], node, NULL);
|
||||
desc_set_defaults(i, &desc[i], node, NULL, NULL);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
@@ -328,11 +355,12 @@ static void free_desc(unsigned int irq)
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
|
||||
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
const struct cpumask *affinity,
|
||||
struct module *owner)
|
||||
{
|
||||
u32 i;
|
||||
@@ -453,12 +481,15 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
|
||||
* @cnt: Number of consecutive irqs to allocate.
|
||||
* @node: Preferred node on which the irq descriptor should be allocated
|
||||
* @owner: Owning module (can be NULL)
|
||||
* @affinity: Optional pointer to an affinity mask which hints where the
|
||||
* irq descriptors should be allocated and which default
|
||||
* affinities to use
|
||||
*
|
||||
* Returns the first irq number or error code
|
||||
*/
|
||||
int __ref
|
||||
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
|
||||
struct module *owner)
|
||||
struct module *owner, const struct cpumask *affinity)
|
||||
{
|
||||
int start, ret;
|
||||
|
||||
@@ -494,7 +525,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
|
||||
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return alloc_descs(start, cnt, node, owner);
|
||||
return alloc_descs(start, cnt, node, affinity, owner);
|
||||
|
||||
err:
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
@@ -512,7 +543,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs);
|
||||
*/
|
||||
unsigned int irq_alloc_hwirqs(int cnt, int node)
|
||||
{
|
||||
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
|
||||
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
|
||||
|
||||
if (irq < 0)
|
||||
return 0;
|
||||
|
@@ -481,7 +481,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
|
||||
}
|
||||
|
||||
/* Allocate a virtual interrupt number */
|
||||
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node));
|
||||
virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
|
||||
if (virq <= 0) {
|
||||
pr_debug("-> virq allocation failed\n");
|
||||
return 0;
|
||||
@@ -567,6 +567,7 @@ static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
|
||||
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
struct irq_data *irq_data;
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int type = IRQ_TYPE_NONE;
|
||||
int virq;
|
||||
@@ -588,15 +589,46 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
|
||||
if (irq_domain_translate(domain, fwspec, &hwirq, &type))
|
||||
return 0;
|
||||
|
||||
if (irq_domain_is_hierarchy(domain)) {
|
||||
/*
|
||||
* WARN if the irqchip returns a type with bits
|
||||
* outside the sense mask set and clear these bits.
|
||||
*/
|
||||
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
/*
|
||||
* If we've already configured this interrupt,
|
||||
* don't do it again, or hell will break loose.
|
||||
*/
|
||||
virq = irq_find_mapping(domain, hwirq);
|
||||
if (virq) {
|
||||
/*
|
||||
* If we've already configured this interrupt,
|
||||
* don't do it again, or hell will break loose.
|
||||
* If the trigger type is not specified or matches the
|
||||
* current trigger type then we are done so return the
|
||||
* interrupt number.
|
||||
*/
|
||||
virq = irq_find_mapping(domain, hwirq);
|
||||
if (virq)
|
||||
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
|
||||
return virq;
|
||||
|
||||
/*
|
||||
* If the trigger type has not been set yet, then set
|
||||
* it now and return the interrupt number.
|
||||
*/
|
||||
if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
|
||||
irq_data = irq_get_irq_data(virq);
|
||||
if (!irq_data)
|
||||
return 0;
|
||||
|
||||
irqd_set_trigger_type(irq_data, type);
|
||||
return virq;
|
||||
}
|
||||
|
||||
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
|
||||
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (irq_domain_is_hierarchy(domain)) {
|
||||
virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
|
||||
if (virq <= 0)
|
||||
return 0;
|
||||
@@ -607,10 +639,18 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
|
||||
return virq;
|
||||
}
|
||||
|
||||
/* Set type if specified and different than the current one */
|
||||
if (type != IRQ_TYPE_NONE &&
|
||||
type != irq_get_trigger_type(virq))
|
||||
irq_set_irq_type(virq, type);
|
||||
irq_data = irq_get_irq_data(virq);
|
||||
if (!irq_data) {
|
||||
if (irq_domain_is_hierarchy(domain))
|
||||
irq_domain_free_irqs(virq, 1);
|
||||
else
|
||||
irq_dispose_mapping(virq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Store trigger type */
|
||||
irqd_set_trigger_type(irq_data, type);
|
||||
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
|
||||
@@ -640,8 +680,12 @@ void irq_dispose_mapping(unsigned int virq)
|
||||
if (WARN_ON(domain == NULL))
|
||||
return;
|
||||
|
||||
irq_domain_disassociate(domain, virq);
|
||||
irq_free_desc(virq);
|
||||
if (irq_domain_is_hierarchy(domain)) {
|
||||
irq_domain_free_irqs(virq, 1);
|
||||
} else {
|
||||
irq_domain_disassociate(domain, virq);
|
||||
irq_free_desc(virq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
|
||||
|
||||
@@ -835,19 +879,23 @@ const struct irq_domain_ops irq_domain_simple_ops = {
|
||||
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
|
||||
|
||||
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
|
||||
int node)
|
||||
int node, const struct cpumask *affinity)
|
||||
{
|
||||
unsigned int hint;
|
||||
|
||||
if (virq >= 0) {
|
||||
virq = irq_alloc_descs(virq, virq, cnt, node);
|
||||
virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
|
||||
affinity);
|
||||
} else {
|
||||
hint = hwirq % nr_irqs;
|
||||
if (hint == 0)
|
||||
hint++;
|
||||
virq = irq_alloc_descs_from(hint, cnt, node);
|
||||
if (virq <= 0 && hint > 1)
|
||||
virq = irq_alloc_descs_from(1, cnt, node);
|
||||
virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
|
||||
affinity);
|
||||
if (virq <= 0 && hint > 1) {
|
||||
virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
|
||||
affinity);
|
||||
}
|
||||
}
|
||||
|
||||
return virq;
|
||||
@@ -1144,8 +1192,10 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
|
||||
if (recursive)
|
||||
ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
|
||||
nr_irqs, arg);
|
||||
if (ret >= 0)
|
||||
ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
|
||||
if (ret < 0 && recursive)
|
||||
irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
|
||||
|
||||
@@ -1160,6 +1210,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
|
||||
* @node: NUMA node id for memory allocation
|
||||
* @arg: domain specific argument
|
||||
* @realloc: IRQ descriptors have already been allocated if true
|
||||
* @affinity: Optional irq affinity mask for multiqueue devices
|
||||
*
|
||||
* Allocate IRQ numbers and initialized all data structures to support
|
||||
* hierarchy IRQ domains.
|
||||
@@ -1175,7 +1226,7 @@ int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
|
||||
*/
|
||||
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
|
||||
unsigned int nr_irqs, int node, void *arg,
|
||||
bool realloc)
|
||||
bool realloc, const struct cpumask *affinity)
|
||||
{
|
||||
int i, ret, virq;
|
||||
|
||||
@@ -1193,7 +1244,8 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
|
||||
if (realloc && irq_base >= 0) {
|
||||
virq = irq_base;
|
||||
} else {
|
||||
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node);
|
||||
virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
|
||||
affinity);
|
||||
if (virq < 0) {
|
||||
pr_debug("cannot allocate IRQ(base %d, count %d)\n",
|
||||
irq_base, nr_irqs);
|
||||
|
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t irq_default_affinity;
|
||||
|
||||
static int __irq_can_set_affinity(struct irq_desc *desc)
|
||||
static bool __irq_can_set_affinity(struct irq_desc *desc)
|
||||
{
|
||||
if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
||||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
||||
return 0;
|
||||
return 1;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -133,6 +133,21 @@ int irq_can_set_affinity(unsigned int irq)
|
||||
return __irq_can_set_affinity(irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
|
||||
* @irq: Interrupt to check
|
||||
*
|
||||
* Like irq_can_set_affinity() above, but additionally checks for the
|
||||
* AFFINITY_MANAGED flag.
|
||||
*/
|
||||
bool irq_can_set_affinity_usr(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return __irq_can_set_affinity(desc) &&
|
||||
!irqd_affinity_is_managed(&desc->irq_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
||||
* @desc: irq descriptor which has affitnity changed
|
||||
@@ -338,10 +353,11 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Preserve an userspace affinity setup, but make sure that
|
||||
* one of the targets is online.
|
||||
* Preserve the managed affinity setting and an userspace affinity
|
||||
* setup, but make sure that one of the targets is online.
|
||||
*/
|
||||
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
|
||||
if (irqd_affinity_is_managed(&desc->irq_data) ||
|
||||
irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
|
||||
if (cpumask_intersects(desc->irq_common_data.affinity,
|
||||
cpu_online_mask))
|
||||
set = desc->irq_common_data.affinity;
|
||||
@@ -1116,6 +1132,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
|
||||
new->irq = irq;
|
||||
|
||||
/*
|
||||
* If the trigger type is not specified by the caller,
|
||||
* then use the default for this interrupt.
|
||||
*/
|
||||
if (!(new->flags & IRQF_TRIGGER_MASK))
|
||||
new->flags |= irqd_get_trigger_type(&desc->irq_data);
|
||||
|
||||
/*
|
||||
* Check whether the interrupt nests into another interrupt
|
||||
* thread.
|
||||
@@ -1409,10 +1432,18 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
||||
|
||||
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||
return -EINVAL;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, act);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
if (retval)
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(setup_irq);
|
||||
@@ -1506,6 +1537,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
module_put(desc->owner);
|
||||
kfree(action->secondary);
|
||||
return action;
|
||||
@@ -1648,11 +1680,16 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
||||
action->name = devname;
|
||||
action->dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
if (retval) {
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
kfree(action->secondary);
|
||||
kfree(action);
|
||||
}
|
||||
@@ -1730,7 +1767,14 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the trigger type is not specified by the caller, then
|
||||
* use the default for this interrupt.
|
||||
*/
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
type = irqd_get_trigger_type(&desc->irq_data);
|
||||
|
||||
if (type != IRQ_TYPE_NONE) {
|
||||
int ret;
|
||||
|
||||
@@ -1822,6 +1866,7 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
|
||||
|
||||
unregister_handler_proc(irq, action);
|
||||
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
module_put(desc->owner);
|
||||
return action;
|
||||
|
||||
@@ -1884,10 +1929,18 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
|
||||
|
||||
if (!desc || !irq_settings_is_per_cpu_devid(desc))
|
||||
return -EINVAL;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, act);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
if (retval)
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -1931,12 +1984,18 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||
action->name = devname;
|
||||
action->percpu_dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
if (retval)
|
||||
if (retval) {
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
kfree(action);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@@ -324,7 +324,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
struct msi_domain_ops *ops = info->ops;
|
||||
msi_alloc_info_t arg;
|
||||
struct msi_desc *desc;
|
||||
int i, ret, virq = -1;
|
||||
int i, ret, virq;
|
||||
|
||||
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
|
||||
if (ret)
|
||||
@@ -332,13 +332,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
|
||||
for_each_msi_entry(desc, dev) {
|
||||
ops->set_desc(&arg, desc);
|
||||
if (info->flags & MSI_FLAG_IDENTITY_MAP)
|
||||
virq = (int)ops->get_hwirq(info, &arg);
|
||||
else
|
||||
virq = -1;
|
||||
|
||||
virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used,
|
||||
dev_to_node(dev), &arg, false);
|
||||
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
|
||||
dev_to_node(dev), &arg, false,
|
||||
desc->affinity);
|
||||
if (virq < 0) {
|
||||
ret = -ENOSPC;
|
||||
if (ops->handle_error)
|
||||
@@ -356,6 +353,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
ops->msi_finish(&arg, 0);
|
||||
|
||||
for_each_msi_entry(desc, dev) {
|
||||
virq = desc->irq;
|
||||
if (desc->nvec_used == 1)
|
||||
dev_dbg(dev, "irq %d for MSI\n", virq);
|
||||
else
|
||||
|
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
|
||||
cpumask_var_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_can_set_affinity(irq) || no_irq_affinity)
|
||||
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
|
||||
return -EIO;
|
||||
|
||||
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
||||
@@ -311,7 +311,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
!name_unique(irq, action))
|
||||
return;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
snprintf(name, MAX_NAMELEN, "%s", action->name);
|
||||
|
||||
/* create /proc/irq/1234/handler/ */
|
||||
@@ -340,7 +339,6 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||
if (desc->dir)
|
||||
goto out_unlock;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%d", irq);
|
||||
|
||||
/* create /proc/irq/1234 */
|
||||
@@ -386,7 +384,6 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||
#endif
|
||||
remove_proc_entry("spurious", desc->dir);
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%u", irq);
|
||||
remove_proc_entry(name, root_irq_dir);
|
||||
}
|
||||
@@ -421,12 +418,8 @@ void init_irq_proc(void)
|
||||
/*
|
||||
* Create entries for all existing IRQs.
|
||||
*/
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
for_each_irq_desc(irq, desc)
|
||||
register_irq_proc(irq, desc);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_SHOW
|
||||
|
Reference in New Issue
Block a user