Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The interrupt departement delivers this time: - New infrastructure to manage NMIs on platforms which have a sane NMI delivery, i.e. identifiable NMI vectors instead of a single lump. - Simplification of the interrupt affinity management so drivers don't have to implement ugly loops around the PCI/MSI enablement. - Speedup for interrupt statistics in /proc/stat - Provide a function to retrieve the default irq domain - A new interrupt controller for the Loongson LS1X platform - Affinity support for the SiFive PLIC - Better support for the iMX irqsteer driver - NUMA aware memory allocations for GICv3 - The usual small fixes, improvements and cleanups all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) irqchip/imx-irqsteer: Add multi output interrupts support irqchip/imx-irqsteer: Change to use reg_num instead of irq_group dt-bindings: irq: imx-irqsteer: Add multi output interrupts support dt-binding: irq: imx-irqsteer: Use irq number instead of group number irqchip/brcmstb-l2: Use _irqsave locking variants in non-interrupt code irqchip/gicv3-its: Use NUMA aware memory allocation for ITS tables irqdomain: Allow the default irq domain to be retrieved irqchip/sifive-plic: Implement irq_set_affinity() for SMP host irqchip/sifive-plic: Differentiate between PLIC handler and context irqchip/sifive-plic: Add warning in plic_init() if handler already present irqchip/sifive-plic: Pre-compute context hart base and enable base PCI/MSI: Remove obsolete sanity checks for multiple interrupt sets genirq/affinity: Remove the leftovers of the original set support nvme-pci: Simplify interrupt allocation genirq/affinity: Add new callback for (re)calculating interrupt sets genirq/affinity: Store interrupt sets size in struct irq_affinity genirq/affinity: Code consolidation irqchip/irq-sifive-plic: Check and continue in case of an invalid cpuid. irqchip/i8259: Fix shutdown order by moving syscore_ops registration dt-bindings: interrupt-controller: loongson ls1x intc ...
This commit is contained in:
@@ -9,7 +9,7 @@
|
||||
#include <linux/cpu.h>
|
||||
|
||||
static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
|
||||
int cpus_per_vec)
|
||||
unsigned int cpus_per_vec)
|
||||
{
|
||||
const struct cpumask *siblmsk;
|
||||
int cpu, sibl;
|
||||
@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
|
||||
}
|
||||
|
||||
static int __irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
int startvec, int numvecs, int firstvec,
|
||||
unsigned int startvec,
|
||||
unsigned int numvecs,
|
||||
unsigned int firstvec,
|
||||
cpumask_var_t *node_to_cpumask,
|
||||
const struct cpumask *cpu_mask,
|
||||
struct cpumask *nmsk,
|
||||
struct irq_affinity_desc *masks)
|
||||
{
|
||||
int n, nodes, cpus_per_vec, extra_vecs, done = 0;
|
||||
int last_affv = firstvec + numvecs;
|
||||
int curvec = startvec;
|
||||
unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
|
||||
unsigned int last_affv = firstvec + numvecs;
|
||||
unsigned int curvec = startvec;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
|
||||
if (!cpumask_weight(cpu_mask))
|
||||
@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
*/
|
||||
if (numvecs <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_or(&masks[curvec].mask,
|
||||
&masks[curvec].mask,
|
||||
node_to_cpumask[n]);
|
||||
cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
|
||||
node_to_cpumask[n]);
|
||||
if (++curvec == last_affv)
|
||||
curvec = firstvec;
|
||||
}
|
||||
done = numvecs;
|
||||
goto out;
|
||||
return numvecs;
|
||||
}
|
||||
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
int ncpus, v, vecs_to_assign, vecs_per_node;
|
||||
unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
|
||||
|
||||
/* Spread the vectors per node */
|
||||
vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
|
||||
@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
curvec = firstvec;
|
||||
--nodes;
|
||||
}
|
||||
|
||||
out:
|
||||
return done;
|
||||
}
|
||||
|
||||
@@ -174,19 +172,24 @@ out:
|
||||
* 2) spread other possible CPUs on these vectors
|
||||
*/
|
||||
static int irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
int startvec, int numvecs, int firstvec,
|
||||
cpumask_var_t *node_to_cpumask,
|
||||
unsigned int startvec, unsigned int numvecs,
|
||||
unsigned int firstvec,
|
||||
struct irq_affinity_desc *masks)
|
||||
{
|
||||
int curvec = startvec, nr_present, nr_others;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int curvec = startvec, nr_present, nr_others;
|
||||
cpumask_var_t *node_to_cpumask;
|
||||
cpumask_var_t nmsk, npresmsk;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
||||
return ret;
|
||||
|
||||
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
|
||||
goto fail;
|
||||
goto fail_nmsk;
|
||||
|
||||
node_to_cpumask = alloc_node_to_cpumask();
|
||||
if (!node_to_cpumask)
|
||||
goto fail_npresmsk;
|
||||
|
||||
ret = 0;
|
||||
/* Stabilize the cpumasks */
|
||||
@@ -217,13 +220,22 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
if (nr_present < numvecs)
|
||||
WARN_ON(nr_present + nr_others < numvecs);
|
||||
|
||||
free_node_to_cpumask(node_to_cpumask);
|
||||
|
||||
fail_npresmsk:
|
||||
free_cpumask_var(npresmsk);
|
||||
|
||||
fail:
|
||||
fail_nmsk:
|
||||
free_cpumask_var(nmsk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
|
||||
{
|
||||
affd->nr_sets = 1;
|
||||
affd->set_size[0] = affvecs;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
|
||||
* @nvecs: The total number of vectors
|
||||
@@ -232,50 +244,62 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
|
||||
* Returns the irq_affinity_desc pointer or NULL if allocation failed.
|
||||
*/
|
||||
struct irq_affinity_desc *
|
||||
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
|
||||
{
|
||||
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
int curvec, usedvecs;
|
||||
cpumask_var_t *node_to_cpumask;
|
||||
unsigned int affvecs, curvec, usedvecs, i;
|
||||
struct irq_affinity_desc *masks = NULL;
|
||||
int i, nr_sets;
|
||||
|
||||
/*
|
||||
* If there aren't any vectors left after applying the pre/post
|
||||
* vectors don't bother with assigning affinity.
|
||||
* Determine the number of vectors which need interrupt affinities
|
||||
* assigned. If the pre/post request exhausts the available vectors
|
||||
* then nothing to do here except for invoking the calc_sets()
|
||||
* callback so the device driver can adjust to the situation. If there
|
||||
* is only a single vector, then managing the queue is pointless as
|
||||
* well.
|
||||
*/
|
||||
if (nvecs == affd->pre_vectors + affd->post_vectors)
|
||||
if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
|
||||
affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
else
|
||||
affvecs = 0;
|
||||
|
||||
/*
|
||||
* Simple invocations do not provide a calc_sets() callback. Install
|
||||
* the generic one.
|
||||
*/
|
||||
if (!affd->calc_sets)
|
||||
affd->calc_sets = default_calc_sets;
|
||||
|
||||
/* Recalculate the sets */
|
||||
affd->calc_sets(affd, affvecs);
|
||||
|
||||
if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
|
||||
return NULL;
|
||||
|
||||
node_to_cpumask = alloc_node_to_cpumask();
|
||||
if (!node_to_cpumask)
|
||||
/* Nothing to assign? */
|
||||
if (!affvecs)
|
||||
return NULL;
|
||||
|
||||
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
|
||||
if (!masks)
|
||||
goto outnodemsk;
|
||||
return NULL;
|
||||
|
||||
/* Fill out vectors at the beginning that don't need affinity */
|
||||
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
|
||||
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
|
||||
|
||||
/*
|
||||
* Spread on present CPUs starting from affd->pre_vectors. If we
|
||||
* have multiple sets, build each sets affinity mask separately.
|
||||
*/
|
||||
nr_sets = affd->nr_sets;
|
||||
if (!nr_sets)
|
||||
nr_sets = 1;
|
||||
|
||||
for (i = 0, usedvecs = 0; i < nr_sets; i++) {
|
||||
int this_vecs = affd->sets ? affd->sets[i] : affvecs;
|
||||
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
|
||||
unsigned int this_vecs = affd->set_size[i];
|
||||
int ret;
|
||||
|
||||
ret = irq_build_affinity_masks(affd, curvec, this_vecs,
|
||||
curvec, node_to_cpumask, masks);
|
||||
curvec, masks);
|
||||
if (ret) {
|
||||
kfree(masks);
|
||||
masks = NULL;
|
||||
goto outnodemsk;
|
||||
return NULL;
|
||||
}
|
||||
curvec += this_vecs;
|
||||
usedvecs += this_vecs;
|
||||
@@ -293,8 +317,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
|
||||
masks[i].is_managed = 1;
|
||||
|
||||
outnodemsk:
|
||||
free_node_to_cpumask(node_to_cpumask);
|
||||
return masks;
|
||||
}
|
||||
|
||||
@@ -304,25 +326,22 @@ outnodemsk:
|
||||
* @maxvec: The maximum number of vectors available
|
||||
* @affd: Description of the affinity requirements
|
||||
*/
|
||||
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
|
||||
unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
|
||||
const struct irq_affinity *affd)
|
||||
{
|
||||
int resv = affd->pre_vectors + affd->post_vectors;
|
||||
int vecs = maxvec - resv;
|
||||
int set_vecs;
|
||||
unsigned int resv = affd->pre_vectors + affd->post_vectors;
|
||||
unsigned int set_vecs;
|
||||
|
||||
if (resv > minvec)
|
||||
return 0;
|
||||
|
||||
if (affd->nr_sets) {
|
||||
int i;
|
||||
|
||||
for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
|
||||
set_vecs += affd->sets[i];
|
||||
if (affd->calc_sets) {
|
||||
set_vecs = maxvec - resv;
|
||||
} else {
|
||||
get_online_cpus();
|
||||
set_vecs = cpumask_weight(cpu_possible_mask);
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
return resv + min(set_vecs, vecs);
|
||||
return resv + min(set_vecs, maxvec - resv);
|
||||
}
|
||||
|
@@ -729,6 +729,37 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
|
||||
|
||||
/**
|
||||
* handle_fasteoi_nmi - irq handler for NMI interrupt lines
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* A simple NMI-safe handler, considering the restrictions
|
||||
* from request_nmi.
|
||||
*
|
||||
* Only a single callback will be issued to the chip: an ->eoi()
|
||||
* call when the interrupt has been serviced. This enables support
|
||||
* for modern forms of interrupt handlers, which handle the flow
|
||||
* details in hardware, transparently.
|
||||
*/
|
||||
void handle_fasteoi_nmi(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct irqaction *action = desc->action;
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
irqreturn_t res;
|
||||
|
||||
trace_irq_handler_entry(irq, action);
|
||||
/*
|
||||
* NMIs cannot be shared, there is only one action.
|
||||
*/
|
||||
res = action->handler(irq, action->dev_id);
|
||||
trace_irq_handler_exit(irq, action, res);
|
||||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
|
||||
|
||||
/**
|
||||
* handle_edge_irq - edge type IRQ handler
|
||||
* @desc: the interrupt description structure for this irq
|
||||
@@ -855,7 +886,11 @@ void handle_percpu_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
kstat_incr_irqs_this_cpu(desc);
|
||||
/*
|
||||
* PER CPU interrupts are not serialized. Do not touch
|
||||
* desc->tot_count.
|
||||
*/
|
||||
__kstat_incr_irqs_this_cpu(desc);
|
||||
|
||||
if (chip->irq_ack)
|
||||
chip->irq_ack(&desc->irq_data);
|
||||
@@ -884,7 +919,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
irqreturn_t res;
|
||||
|
||||
kstat_incr_irqs_this_cpu(desc);
|
||||
/*
|
||||
* PER CPU interrupts are not serialized. Do not touch
|
||||
* desc->tot_count.
|
||||
*/
|
||||
__kstat_incr_irqs_this_cpu(desc);
|
||||
|
||||
if (chip->irq_ack)
|
||||
chip->irq_ack(&desc->irq_data);
|
||||
@@ -908,6 +947,29 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
|
||||
* dev ids
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Similar to handle_fasteoi_nmi, but handling the dev_id cookie
|
||||
* as a percpu pointer.
|
||||
*/
|
||||
void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct irqaction *action = desc->action;
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
irqreturn_t res;
|
||||
|
||||
trace_irq_handler_entry(irq, action);
|
||||
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
|
||||
trace_irq_handler_exit(irq, action, res);
|
||||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
static void
|
||||
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
|
||||
int is_chained, const char *name)
|
||||
|
@@ -56,6 +56,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
|
||||
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
|
||||
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
|
||||
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
|
||||
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
|
||||
};
|
||||
|
||||
static void
|
||||
@@ -140,6 +141,7 @@ static const struct irq_bit_descr irqdesc_istates[] = {
|
||||
BIT_MASK_DESCR(IRQS_WAITING),
|
||||
BIT_MASK_DESCR(IRQS_PENDING),
|
||||
BIT_MASK_DESCR(IRQS_SUSPENDED),
|
||||
BIT_MASK_DESCR(IRQS_NMI),
|
||||
};
|
||||
|
||||
|
||||
@@ -203,8 +205,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
if (irq_settings_is_level(desc)) {
|
||||
/* Can't do level, sorry */
|
||||
if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
|
||||
/* Can't do level nor NMIs, sorry */
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
@@ -256,8 +258,6 @@ static int __init irq_debugfs_init(void)
|
||||
int irq;
|
||||
|
||||
root_dir = debugfs_create_dir("irq", NULL);
|
||||
if (!root_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
irq_domain_debugfs_init(root_dir);
|
||||
|
||||
|
@@ -166,7 +166,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
|
||||
|
||||
__irq_wake_thread(desc, action);
|
||||
|
||||
/* Fall through to add to randomness */
|
||||
/* Fall through - to add to randomness */
|
||||
case IRQ_HANDLED:
|
||||
*flags |= action->flags;
|
||||
break;
|
||||
|
@@ -49,6 +49,7 @@ enum {
|
||||
* IRQS_WAITING - irq is waiting
|
||||
* IRQS_PENDING - irq is pending and replayed later
|
||||
* IRQS_SUSPENDED - irq is suspended
|
||||
* IRQS_NMI - irq line is used to deliver NMIs
|
||||
*/
|
||||
enum {
|
||||
IRQS_AUTODETECT = 0x00000001,
|
||||
@@ -60,6 +61,7 @@ enum {
|
||||
IRQS_PENDING = 0x00000200,
|
||||
IRQS_SUSPENDED = 0x00000800,
|
||||
IRQS_TIMINGS = 0x00001000,
|
||||
IRQS_NMI = 0x00002000,
|
||||
};
|
||||
|
||||
#include "debug.h"
|
||||
@@ -242,12 +244,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
|
||||
|
||||
#undef __irqd_to_state
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
||||
static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
||||
{
|
||||
__this_cpu_inc(*desc->kstat_irqs);
|
||||
__this_cpu_inc(kstat.irqs_sum);
|
||||
}
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
|
||||
{
|
||||
__kstat_incr_irqs_this_cpu(desc);
|
||||
desc->tot_count++;
|
||||
}
|
||||
|
||||
static inline int irq_desc_get_node(struct irq_desc *desc)
|
||||
{
|
||||
return irq_common_data_get_node(&desc->irq_common_data);
|
||||
|
@@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
desc->depth = 1;
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
desc->tot_count = 0;
|
||||
desc->name = NULL;
|
||||
desc->owner = owner;
|
||||
for_each_possible_cpu(cpu)
|
||||
@@ -669,6 +670,41 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
|
||||
set_irq_regs(old_regs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN
|
||||
/**
|
||||
* handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
|
||||
* @domain: The domain where to perform the lookup
|
||||
* @hwirq: The HW irq number to convert to a logical one
|
||||
* @regs: Register file coming from the low-level handling code
|
||||
*
|
||||
* Returns: 0 on success, or -EINVAL if conversion has failed
|
||||
*/
|
||||
int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
unsigned int irq;
|
||||
int ret = 0;
|
||||
|
||||
nmi_enter();
|
||||
|
||||
irq = irq_find_mapping(domain, hwirq);
|
||||
|
||||
/*
|
||||
* ack_bad_irq is not NMI-safe, just report
|
||||
* an invalid interrupt.
|
||||
*/
|
||||
if (likely(irq))
|
||||
generic_handle_irq(irq);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
nmi_exit();
|
||||
set_irq_regs(old_regs);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Dynamic interrupt handling */
|
||||
@@ -919,11 +955,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
unsigned int kstat_irqs(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
int cpu;
|
||||
unsigned int sum = 0;
|
||||
int cpu;
|
||||
|
||||
if (!desc || !desc->kstat_irqs)
|
||||
return 0;
|
||||
if (!irq_settings_is_per_cpu_devid(desc) &&
|
||||
!irq_settings_is_per_cpu(desc))
|
||||
return desc->tot_count;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
|
||||
return sum;
|
||||
|
@@ -458,6 +458,20 @@ void irq_set_default_host(struct irq_domain *domain)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_default_host);
|
||||
|
||||
/**
|
||||
* irq_get_default_host() - Retrieve the "default" irq domain
|
||||
*
|
||||
* Returns: the default domain, if any.
|
||||
*
|
||||
* Modern code should never use this. This should only be used on
|
||||
* systems that cannot implement a firmware->fwnode mapping (which
|
||||
* both DT and ACPI provide).
|
||||
*/
|
||||
struct irq_domain *irq_get_default_host(void)
|
||||
{
|
||||
return irq_default_domain;
|
||||
}
|
||||
|
||||
static void irq_domain_clear_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
@@ -1749,8 +1763,6 @@ void __init irq_domain_debugfs_init(struct dentry *root)
|
||||
struct irq_domain *d;
|
||||
|
||||
domain_dir = debugfs_create_dir("domains", root);
|
||||
if (!domain_dir)
|
||||
return;
|
||||
|
||||
debugfs_create_file("default", 0444, domain_dir, NULL,
|
||||
&irq_domain_debug_fops);
|
||||
|
@@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
||||
/* The release function is promised process context */
|
||||
might_sleep();
|
||||
|
||||
if (!desc)
|
||||
if (!desc || desc->istate & IRQS_NMI)
|
||||
return -EINVAL;
|
||||
|
||||
/* Complete initialisation of *notify */
|
||||
@@ -553,6 +553,21 @@ bool disable_hardirq(unsigned int irq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_hardirq);
|
||||
|
||||
/**
|
||||
* disable_nmi_nosync - disable an nmi without waiting
|
||||
* @irq: Interrupt to disable
|
||||
*
|
||||
* Disable the selected interrupt line. Disables and enables are
|
||||
* nested.
|
||||
* The interrupt to disable must have been requested through request_nmi.
|
||||
* Unlike disable_nmi(), this function does not ensure existing
|
||||
* instances of the IRQ handler have completed before returning.
|
||||
*/
|
||||
void disable_nmi_nosync(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
}
|
||||
|
||||
void __enable_irq(struct irq_desc *desc)
|
||||
{
|
||||
switch (desc->depth) {
|
||||
@@ -609,6 +624,20 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(enable_irq);
|
||||
|
||||
/**
|
||||
* enable_nmi - enable handling of an nmi
|
||||
* @irq: Interrupt to enable
|
||||
*
|
||||
* The interrupt to enable must have been requested through request_nmi.
|
||||
* Undoes the effect of one call to disable_nmi(). If this
|
||||
* matches the last disable, processing of interrupts on this
|
||||
* IRQ line is re-enabled.
|
||||
*/
|
||||
void enable_nmi(unsigned int irq)
|
||||
{
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
||||
static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
@@ -644,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't use NMIs as wake up interrupts please */
|
||||
if (desc->istate & IRQS_NMI) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* wakeup-capable irqs can be shared between drivers that
|
||||
* don't need to have the same sleep mode behaviors.
|
||||
*/
|
||||
@@ -666,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
||||
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
irq_put_desc_busunlock(desc, flags);
|
||||
return ret;
|
||||
}
|
||||
@@ -726,6 +763,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
|
||||
case IRQ_SET_MASK_OK_DONE:
|
||||
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
|
||||
irqd_set(&desc->irq_data, flags);
|
||||
/* fall through */
|
||||
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
flags = irqd_get_trigger_type(&desc->irq_data);
|
||||
@@ -1128,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc)
|
||||
c->irq_release_resources(d);
|
||||
}
|
||||
|
||||
static bool irq_supports_nmi(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
/* Only IRQs directly managed by the root irqchip can be set as NMI */
|
||||
if (d->parent_data)
|
||||
return false;
|
||||
#endif
|
||||
/* Don't support NMIs for chips behind a slow bus */
|
||||
if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
|
||||
return false;
|
||||
|
||||
return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
|
||||
}
|
||||
|
||||
static int irq_nmi_setup(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
struct irq_chip *c = d->chip;
|
||||
|
||||
return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
|
||||
}
|
||||
|
||||
static void irq_nmi_teardown(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
struct irq_chip *c = d->chip;
|
||||
|
||||
if (c->irq_nmi_teardown)
|
||||
c->irq_nmi_teardown(d);
|
||||
}
|
||||
|
||||
static int
|
||||
setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
|
||||
{
|
||||
@@ -1302,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
* fields must have IRQF_SHARED set and the bits which
|
||||
* set the trigger type must match. Also all must
|
||||
* agree on ONESHOT.
|
||||
* Interrupt lines used for NMIs cannot be shared.
|
||||
*/
|
||||
unsigned int oldtype;
|
||||
|
||||
if (desc->istate & IRQS_NMI) {
|
||||
pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
|
||||
new->name, irq, desc->irq_data.chip->name);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If nobody did set the configuration before, inherit
|
||||
* the one provided by the requester.
|
||||
@@ -1756,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
|
||||
}
|
||||
EXPORT_SYMBOL(free_irq);
|
||||
|
||||
/* This function must be called with desc->lock held */
|
||||
static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
const char *devname = NULL;
|
||||
|
||||
desc->istate &= ~IRQS_NMI;
|
||||
|
||||
if (!WARN_ON(desc->action == NULL)) {
|
||||
irq_pm_remove_action(desc, desc->action);
|
||||
devname = desc->action->name;
|
||||
unregister_handler_proc(irq, desc->action);
|
||||
|
||||
kfree(desc->action);
|
||||
desc->action = NULL;
|
||||
}
|
||||
|
||||
irq_settings_clr_disable_unlazy(desc);
|
||||
irq_shutdown(desc);
|
||||
|
||||
irq_release_resources(desc);
|
||||
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
module_put(desc->owner);
|
||||
|
||||
return devname;
|
||||
}
|
||||
|
||||
const void *free_nmi(unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
const void *devname;
|
||||
|
||||
if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||
return NULL;
|
||||
|
||||
/* NMI still enabled */
|
||||
if (WARN_ON(desc->depth == 0))
|
||||
disable_nmi_nosync(irq);
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
irq_nmi_teardown(desc);
|
||||
devname = __cleanup_nmi(irq, desc);
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return devname;
|
||||
}
|
||||
|
||||
/**
|
||||
* request_threaded_irq - allocate an interrupt line
|
||||
* @irq: Interrupt line to allocate
|
||||
@@ -1925,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(request_any_context_irq);
|
||||
|
||||
/**
|
||||
* request_nmi - allocate an interrupt line for NMI delivery
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs.
|
||||
* Threaded handler for threaded interrupts.
|
||||
* @irqflags: Interrupt type flags
|
||||
* @name: An ascii name for the claiming device
|
||||
* @dev_id: A cookie passed back to the handler function
|
||||
*
|
||||
* This call allocates interrupt resources and enables the
|
||||
* interrupt line and IRQ handling. It sets up the IRQ line
|
||||
* to be handled as an NMI.
|
||||
*
|
||||
* An interrupt line delivering NMIs cannot be shared and IRQ handling
|
||||
* cannot be threaded.
|
||||
*
|
||||
* Interrupt lines requested for NMI delivering must produce per cpu
|
||||
* interrupts and have auto enabling setting disabled.
|
||||
*
|
||||
* Dev_id must be globally unique. Normally the address of the
|
||||
* device data structure is used as the cookie. Since the handler
|
||||
* receives this value it makes sense to use it.
|
||||
*
|
||||
* If the interrupt line cannot be used to deliver NMIs, function
|
||||
* will fail and return a negative value.
|
||||
*/
|
||||
int request_nmi(unsigned int irq, irq_handler_t handler,
|
||||
unsigned long irqflags, const char *name, void *dev_id)
|
||||
{
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
if (irq == IRQ_NOTCONNECTED)
|
||||
return -ENOTCONN;
|
||||
|
||||
/* NMI cannot be shared, used for Polling */
|
||||
if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(irqflags & IRQF_PERCPU))
|
||||
return -EINVAL;
|
||||
|
||||
if (!handler)
|
||||
return -EINVAL;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc || irq_settings_can_autoenable(desc) ||
|
||||
!irq_settings_can_request(desc) ||
|
||||
WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
|
||||
!irq_supports_nmi(desc))
|
||||
return -EINVAL;
|
||||
|
||||
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
if (!action)
|
||||
return -ENOMEM;
|
||||
|
||||
action->handler = handler;
|
||||
action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
|
||||
action->name = name;
|
||||
action->dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
goto err_out;
|
||||
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
if (retval)
|
||||
goto err_irq_setup;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/* Setup NMI state */
|
||||
desc->istate |= IRQS_NMI;
|
||||
retval = irq_nmi_setup(desc);
|
||||
if (retval) {
|
||||
__cleanup_nmi(irq, desc);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq_setup:
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
err_out:
|
||||
kfree(action);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
void enable_percpu_irq(unsigned int irq, unsigned int type)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
@@ -1959,6 +2186,11 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_percpu_irq);
|
||||
|
||||
void enable_percpu_nmi(unsigned int irq, unsigned int type)
|
||||
{
|
||||
enable_percpu_irq(irq, type);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_percpu_is_enabled - Check whether the per cpu irq is enabled
|
||||
* @irq: Linux irq number to check for
|
||||
@@ -1998,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_percpu_irq);
|
||||
|
||||
void disable_percpu_nmi(unsigned int irq)
|
||||
{
|
||||
disable_percpu_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal function to unregister a percpu irqaction.
|
||||
*/
|
||||
@@ -2029,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
|
||||
/* Found it - now remove it from the list of entries: */
|
||||
desc->action = NULL;
|
||||
|
||||
desc->istate &= ~IRQS_NMI;
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
unregister_handler_proc(irq, action);
|
||||
@@ -2082,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_percpu_irq);
|
||||
|
||||
void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc || !irq_settings_is_per_cpu_devid(desc))
|
||||
return;
|
||||
|
||||
if (WARN_ON(!(desc->istate & IRQS_NMI)))
|
||||
return;
|
||||
|
||||
kfree(__free_percpu_irq(irq, dev_id));
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_percpu_irq - setup a per-cpu interrupt
|
||||
* @irq: Interrupt line to setup
|
||||
@@ -2171,6 +2423,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__request_percpu_irq);
|
||||
|
||||
/**
|
||||
* request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs.
|
||||
* @name: An ascii name for the claiming device
|
||||
* @dev_id: A percpu cookie passed back to the handler function
|
||||
*
|
||||
* This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
|
||||
* have to be setup on each CPU by calling prepare_percpu_nmi() before
|
||||
* being enabled on the same CPU by using enable_percpu_nmi().
|
||||
*
|
||||
* Dev_id must be globally unique. It is a per-cpu variable, and
|
||||
* the handler gets called with the interrupted CPU's instance of
|
||||
* that variable.
|
||||
*
|
||||
* Interrupt lines requested for NMI delivering should have auto enabling
|
||||
* setting disabled.
|
||||
*
|
||||
* If the interrupt line cannot be used to deliver NMIs, function
|
||||
* will fail returning a negative value.
|
||||
*/
|
||||
int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
|
||||
const char *name, void __percpu *dev_id)
|
||||
{
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
if (!handler)
|
||||
return -EINVAL;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc || !irq_settings_can_request(desc) ||
|
||||
!irq_settings_is_per_cpu_devid(desc) ||
|
||||
irq_settings_can_autoenable(desc) ||
|
||||
!irq_supports_nmi(desc))
|
||||
return -EINVAL;
|
||||
|
||||
/* The line cannot already be NMI */
|
||||
if (desc->istate & IRQS_NMI)
|
||||
return -EINVAL;
|
||||
|
||||
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||
if (!action)
|
||||
return -ENOMEM;
|
||||
|
||||
action->handler = handler;
|
||||
action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
|
||||
| IRQF_NOBALANCING;
|
||||
action->name = name;
|
||||
action->percpu_dev_id = dev_id;
|
||||
|
||||
retval = irq_chip_pm_get(&desc->irq_data);
|
||||
if (retval < 0)
|
||||
goto err_out;
|
||||
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
if (retval)
|
||||
goto err_irq_setup;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->istate |= IRQS_NMI;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq_setup:
|
||||
irq_chip_pm_put(&desc->irq_data);
|
||||
err_out:
|
||||
kfree(action);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* prepare_percpu_nmi - performs CPU local setup for NMI delivery
|
||||
* @irq: Interrupt line to prepare for NMI delivery
|
||||
*
|
||||
* This call prepares an interrupt line to deliver NMI on the current CPU,
|
||||
* before that interrupt line gets enabled with enable_percpu_nmi().
|
||||
*
|
||||
* As a CPU local operation, this should be called from non-preemptible
|
||||
* context.
|
||||
*
|
||||
* If the interrupt line cannot be used to deliver NMIs, function
|
||||
* will fail returning a negative value.
|
||||
*/
|
||||
int prepare_percpu_nmi(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
desc = irq_get_desc_lock(irq, &flags,
|
||||
IRQ_GET_DESC_CHECK_PERCPU);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!(desc->istate & IRQS_NMI),
|
||||
KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
|
||||
irq)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = irq_nmi_setup(desc);
|
||||
if (ret) {
|
||||
pr_err("Failed to setup NMI delivery: irq %u\n", irq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* teardown_percpu_nmi - undoes NMI setup of IRQ line
|
||||
* @irq: Interrupt line from which CPU local NMI configuration should be
|
||||
* removed
|
||||
*
|
||||
* This call undoes the setup done by prepare_percpu_nmi().
|
||||
*
|
||||
* IRQ line should not be enabled for the current CPU.
|
||||
*
|
||||
* As a CPU local operation, this should be called from non-preemptible
|
||||
* context.
|
||||
*/
|
||||
void teardown_percpu_nmi(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
desc = irq_get_desc_lock(irq, &flags,
|
||||
IRQ_GET_DESC_CHECK_PERCPU);
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
if (WARN_ON(!(desc->istate & IRQS_NMI)))
|
||||
goto out;
|
||||
|
||||
irq_nmi_teardown(desc);
|
||||
out:
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_get_irqchip_state - returns the irqchip state of a interrupt.
|
||||
* @irq: Interrupt line that is forwarded to a VM
|
||||
|
@@ -101,6 +101,12 @@ bool kthread_should_stop(void)
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_should_stop);
|
||||
|
||||
bool __kthread_should_park(struct task_struct *k)
|
||||
{
|
||||
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__kthread_should_park);
|
||||
|
||||
/**
|
||||
* kthread_should_park - should this kthread park now?
|
||||
*
|
||||
@@ -114,7 +120,7 @@ EXPORT_SYMBOL(kthread_should_stop);
|
||||
*/
|
||||
bool kthread_should_park(void)
|
||||
{
|
||||
return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
|
||||
return __kthread_should_park(current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_should_park);
|
||||
|
||||
|
@@ -89,7 +89,8 @@ static bool ksoftirqd_running(unsigned long pending)
|
||||
|
||||
if (pending & SOFTIRQ_NOW_MASK)
|
||||
return false;
|
||||
return tsk && (tsk->state == TASK_RUNNING);
|
||||
return tsk && (tsk->state == TASK_RUNNING) &&
|
||||
!__kthread_should_park(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user