Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "The usual pile of boring changes:

   - Consolidate tasklet functions to share code instead of duplicating
     it

   - The first step for making the low level entry handler management on
     multi-platform kernels generic

   - A new sysfs file which allows to retrieve the wakeup state of
     interrupts.

   - Ensure that the interrupt thread follows the effective affinity and
     not the programmed affinity to avoid cross core wakeups.

   - Two new interrupt controller drivers (Microsemi Ocelot and Qualcomm
     PDC)

   - Fix the wakeup path clock handling for Reneasas interrupt chips.

   - Rework the boot time register reset for ARM GIC-V2/3

   - Better suspend/resume support for ARM GIV-V3/ITS

   - Add missing locking to the ARM GIC set_type() callback

   - Small fixes for the irq simulator code

   - SPDX identifiers for the irq core code and removal of boiler plate

   - Small cleanups all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
  openrisc: Set CONFIG_MULTI_IRQ_HANDLER
  arm64: Set CONFIG_MULTI_IRQ_HANDLER
  genirq: Make GENERIC_IRQ_MULTI_HANDLER depend on !MULTI_IRQ_HANDLER
  irqchip/gic: Take lock when updating irq type
  irqchip/gic: Update supports_deactivate static key to modern api
  irqchip/gic-v3: Ensure GICR_CTLR.EnableLPI=0 is observed before enabling
  irqchip: Add a driver for the Microsemi Ocelot controller
  dt-bindings: interrupt-controller: Add binding for the Microsemi Ocelot interrupt controller
  irqchip/gic-v3: Probe for SCR_EL3 being clear before resetting AP0Rn
  irqchip/gic-v3: Don't try to reset AP0Rn
  irqchip/gic-v3: Do not check trigger configuration of partitionned LPIs
  genirq: Remove license boilerplate/references
  genirq: Add missing SPDX identifiers
  genirq/matrix: Cleanup SPDX identifier
  genirq: Cleanup top of file comments
  genirq: Pass desc to __irq_free instead of irq number
  irqchip/gic-v3: Loudly complain about the use of IRQ_TYPE_NONE
  irqchip/gic: Loudly complain about the use of IRQ_TYPE_NONE
  RISC-V: Move to the new GENERIC_IRQ_MULTI_HANDLER handler
  genirq: Add CONFIG_GENERIC_IRQ_MULTI_HANDLER
  ...
This commit is contained in:
Linus Torvalds
2018-04-04 15:19:26 -07:00
46 changed files with 1113 additions and 278 deletions

View File

@@ -286,6 +286,11 @@ config IRQ_MXS
select IRQ_DOMAIN
select STMP_DEVICE
config MSCC_OCELOT_IRQ
bool
select IRQ_DOMAIN
select GENERIC_IRQ_CHIP
config MVEBU_GICP
bool
@@ -351,4 +356,13 @@ config GOLDFISH_PIC
Say yes here to enable Goldfish interrupt controller driver used
for Goldfish based virtual platforms.
config QCOM_PDC
bool "QCOM PDC"
depends on ARCH_QCOM
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
help
Power Domain Controller driver to manage and configure wakeup
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
endmenu

View File

@@ -70,6 +70,7 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MSCC_OCELOT_IRQ) += irq-mscc-ocelot.o
obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o
obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
@@ -84,3 +85,4 @@ obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o

View File

@@ -21,6 +21,8 @@
#include "irq-gic-common.h"
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static const struct gic_kvm_info *gic_kvm_info;
const struct gic_kvm_info *gic_get_kvm_info(void)
@@ -53,11 +55,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
u32 confoff = (irq / 16) * 4;
u32 val, oldval;
int ret = 0;
unsigned long flags;
/*
* Read current configuration register, and insert the config
* for "irq", depending on "type".
*/
raw_spin_lock_irqsave(&irq_controller_lock, flags);
val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
if (type & IRQ_TYPE_LEVEL_MASK)
val &= ~confmask;
@@ -65,8 +69,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
val |= confmask;
/* If the current configuration is the same, then we are done */
if (val == oldval)
if (val == oldval) {
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
return 0;
}
/*
* Write back the new configuration, and possibly re-enable
@@ -84,6 +90,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
pr_warn("GIC: PPI%d is secure or misconfigured\n",
irq - 16);
}
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
if (sync_access)
sync_access();

View File

@@ -33,6 +33,7 @@
#include <linux/of_platform.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -46,6 +47,7 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -101,6 +103,8 @@ struct its_node {
struct its_collection *collections;
struct fwnode_handle *fwnode_handle;
u64 (*get_msi_base)(struct its_device *its_dev);
u64 cbaser_save;
u32 ctlr_save;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
@@ -1875,16 +1879,6 @@ static void its_cpu_init_lpis(void)
gic_data_rdist()->pend_page = pend_page;
}
/* Disable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
val &= ~GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/*
* Make sure any change to the table is observable by the GIC.
*/
dsb(sy);
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
GICR_PROPBASER_InnerShareable |
@@ -1938,53 +1932,54 @@ static void its_cpu_init_lpis(void)
dsb(sy);
}
static void its_cpu_init_collection(void)
static void its_cpu_init_collection(struct its_node *its)
{
struct its_node *its;
int cpu;
int cpu = smp_processor_id();
u64 target;
spin_lock(&its_lock);
cpu = smp_processor_id();
/* avoid cross node collections and its mapping */
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
struct device_node *cpu_node;
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
/* avoid cross node collections and its mapping */
if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
struct device_node *cpu_node;
cpu_node = of_get_cpu_node(cpu, NULL);
if (its->numa_node != NUMA_NO_NODE &&
its->numa_node != of_node_to_nid(cpu_node))
continue;
}
cpu_node = of_get_cpu_node(cpu, NULL);
if (its->numa_node != NUMA_NO_NODE &&
its->numa_node != of_node_to_nid(cpu_node))
return;
}
/*
* We now have to bind each collection to its target
* redistributor.
*/
if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* We now have to bind each collection to its target
* This ITS wants the physical address of the
* redistributor.
*/
if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
*/
target = gic_data_rdist()->phys_base;
} else {
/*
* This ITS wants a linear CPU number.
*/
target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
its->collections[cpu].target_address = target;
its->collections[cpu].col_id = cpu;
its_send_mapc(its, &its->collections[cpu], 1);
its_send_invall(its, &its->collections[cpu]);
target = gic_data_rdist()->phys_base;
} else {
/* This ITS wants a linear CPU number. */
target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
its->collections[cpu].target_address = target;
its->collections[cpu].col_id = cpu;
its_send_mapc(its, &its->collections[cpu], 1);
its_send_invall(its, &its->collections[cpu]);
}
static void its_cpu_init_collections(void)
{
struct its_node *its;
spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry)
its_cpu_init_collection(its);
spin_unlock(&its_lock);
}
@@ -3041,6 +3036,113 @@ static void its_enable_quirks(struct its_node *its)
gic_enable_quirks(iidr, its_quirks, its);
}
static int its_save_disable(void)
{
struct its_node *its;
int err = 0;
spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
continue;
base = its->base;
its->ctlr_save = readl_relaxed(base + GITS_CTLR);
err = its_force_quiescent(base);
if (err) {
pr_err("ITS@%pa: failed to quiesce: %d\n",
&its->phys_base, err);
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
goto err;
}
its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
}
err:
if (err) {
list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
void __iomem *base;
if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
continue;
base = its->base;
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
}
}
spin_unlock(&its_lock);
return err;
}
static void its_restore_enable(void)
{
struct its_node *its;
int ret;
spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
void __iomem *base;
int i;
if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
continue;
base = its->base;
/*
* Make sure that the ITS is disabled. If it fails to quiesce,
* don't restore it since writing to CBASER or BASER<n>
* registers is undefined according to the GIC v3 ITS
* Specification.
*/
ret = its_force_quiescent(base);
if (ret) {
pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
&its->phys_base, ret);
continue;
}
gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
/*
* Writing CBASER resets CREADR to 0, so make CWRITER and
* cmd_write line up with it.
*/
its->cmd_write = its->cmd_base;
gits_write_cwriter(0, base + GITS_CWRITER);
/* Restore GITS_BASER from the value cache. */
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = &its->tables[i];
if (!(baser->val & GITS_BASER_VALID))
continue;
its_write_baser(its, baser, baser->val);
}
writel_relaxed(its->ctlr_save, base + GITS_CTLR);
/*
* Reinit the collection if it's stored in the ITS. This is
* indicated by the col_id being less than the HCC field.
* CID < HCC as specified in the GIC v3 Documentation.
*/
if (its->collections[smp_processor_id()].col_id <
GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
its_cpu_init_collection(its);
}
spin_unlock(&its_lock);
}
static struct syscore_ops its_syscore_ops = {
.suspend = its_save_disable,
.resume = its_restore_enable,
};
static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
{
struct irq_domain *inner_domain;
@@ -3260,6 +3362,9 @@ static int __init its_probe_one(struct resource *res,
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
if (GITS_TYPER_HCC(typer))
its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
err = its_init_domain(handle, its);
if (err)
goto out_free_tables;
@@ -3287,15 +3392,71 @@ static bool gic_rdists_supports_plpis(void)
return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
}
static int redist_disable_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
u64 timeout = USEC_PER_SEC;
u64 val;
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
}
val = readl_relaxed(rbase + GICR_CTLR);
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
/* Disable LPIs */
val &= ~GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/* Make sure any change to GICR_CTLR is observable by the GIC */
dsb(sy);
/*
* Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
* from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
* Error out if we time out waiting for RWP to clear.
*/
while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
if (!timeout) {
pr_err("CPU%d: Timeout while disabling LPIs\n",
smp_processor_id());
return -ETIMEDOUT;
}
udelay(1);
timeout--;
}
/*
* After it has been written to 1, it is IMPLEMENTATION
* DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
* cleared to 0. Error out if clearing the bit failed.
*/
if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
return -EBUSY;
}
return 0;
}
int its_cpu_init(void)
{
if (!list_empty(&its_nodes)) {
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
}
int ret;
ret = redist_disable_lpis();
if (ret)
return ret;
its_cpu_init_lpis();
its_cpu_init_collection();
its_cpu_init_collections();
}
return 0;
@@ -3516,5 +3677,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
}
register_syscore_ops(&its_syscore_ops);
return 0;
}

View File

@@ -61,7 +61,7 @@ struct gic_chip_data {
};
static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
static struct gic_kvm_info gic_v3_kvm_info;
static DEFINE_PER_CPU(bool, has_rss);
@@ -354,7 +354,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
else
isb();
@@ -362,7 +362,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
if (static_key_true(&supports_deactivate)) {
if (static_branch_likely(&supports_deactivate_key)) {
if (irqnr < 8192)
gic_write_dir(irqnr);
} else {
@@ -373,7 +373,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
}
if (irqnr < 16) {
gic_write_eoir(irqnr);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_write_dir(irqnr);
#ifdef CONFIG_SMP
/*
@@ -532,6 +532,8 @@ static void gic_cpu_sys_reg_init(void)
int i, cpu = smp_processor_id();
u64 mpidr = cpu_logical_map(cpu);
u64 need_rss = MPIDR_RS(mpidr);
bool group0;
u32 val, pribits;
/*
* Need to check that the SRE bit has actually been set. If
@@ -543,8 +545,28 @@ static void gic_cpu_sys_reg_init(void)
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
pribits = gic_read_ctlr();
pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
pribits++;
/*
* Let's find out if Group0 is under control of EL3 or not by
* setting the highest possible, non-zero priority in PMR.
*
* If SCR_EL3.FIQ is set, the priority gets shifted down in
* order for the CPU interface to set bit 7, and keep the
* actual priority in the non-secure range. In the process, it
* looses the least significant bit and the actual priority
* becomes 0x80. Reading it back returns 0, indicating that
* we're don't have access to Group0.
*/
write_gicreg(BIT(8 - pribits), ICC_PMR_EL1);
val = read_gicreg(ICC_PMR_EL1);
group0 = val != 0;
/* Set priority mask register */
gic_write_pmr(DEFAULT_PMR_VALUE);
write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
/*
* Some firmwares hand over to the kernel with the BPR changed from
@@ -554,7 +576,7 @@ static void gic_cpu_sys_reg_init(void)
*/
gic_write_bpr1(0);
if (static_key_true(&supports_deactivate)) {
if (static_branch_likely(&supports_deactivate_key)) {
/* EOI drops priority only (mode 1) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
} else {
@@ -562,6 +584,37 @@ static void gic_cpu_sys_reg_init(void)
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
}
/* Always whack Group0 before Group1 */
if (group0) {
switch(pribits) {
case 8:
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
}
isb();
}
switch(pribits) {
case 8:
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
}
isb();
/* ... and let's hit the road... */
gic_write_grpen1(1);
@@ -590,9 +643,17 @@ static void gic_cpu_sys_reg_init(void)
pr_crit_once("RSS is required but GICD doesn't support it\n");
}
static bool gicv3_nolpi;
static int __init gicv3_nolpi_cfg(char *buf)
{
return strtobool(buf, &gicv3_nolpi);
}
early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
static int gic_dist_supports_lpis(void)
{
return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
}
static void gic_cpu_init(void)
@@ -823,7 +884,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{
struct irq_chip *chip = &gic_chip;
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
chip = &gic_eoimode1_chip;
/* SGIs are private to the core kernel */
@@ -861,6 +922,8 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
static int gic_irq_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -875,6 +938,7 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*hwirq = fwspec->param[1] + 32;
break;
case 1: /* PPI */
case GIC_IRQ_TYPE_PARTITION:
*hwirq = fwspec->param[1] + 16;
break;
case GIC_IRQ_TYPE_LPI: /* LPI */
@@ -885,6 +949,13 @@ static int gic_irq_domain_translate(struct irq_domain *d,
}
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
/*
* Make it clear that broken DTs are... broken.
* Partitionned PPIs are an unfortunate exception.
*/
WARN_ON(*type == IRQ_TYPE_NONE &&
fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
return 0;
}
@@ -894,6 +965,8 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
@@ -1002,9 +1075,9 @@ static int __init gic_init_bases(void __iomem *dist_base,
int err;
if (!is_hyp_mode_available())
static_key_slow_dec(&supports_deactivate);
static_branch_disable(&supports_deactivate_key);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
pr_info("GIC: Using split EOI/Deactivate mode\n");
gic_data.fwnode = handle;
@@ -1140,7 +1213,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
.fwnode = gic_data.fwnode,
.param_count = 3,
.param = {
[0] = 1,
[0] = GIC_IRQ_TYPE_PARTITION,
[1] = i,
[2] = IRQ_TYPE_NONE,
},
@@ -1239,7 +1312,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
gic_populate_ppi_partitions(node);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_of_setup_kvm_info(node);
return 0;
@@ -1541,7 +1614,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_acpi_setup_kvm_info();
return 0;

View File

@@ -121,7 +121,7 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock);
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
@@ -361,7 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
isb();
handle_domain_irq(gic->domain, irqnr, regs);
@@ -369,7 +369,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
}
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
/*
@@ -453,15 +453,26 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic)
return mask;
}
static bool gic_check_gicv2(void __iomem *base)
{
u32 val = readl_relaxed(base + GIC_CPU_IDENT);
return (val & 0xff0fff) == 0x02043B;
}
static void gic_cpu_if_up(struct gic_chip_data *gic)
{
void __iomem *cpu_base = gic_data_cpu_base(gic);
u32 bypass = 0;
u32 mode = 0;
int i;
if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
mode = GIC_CPU_CTRL_EOImodeNS;
if (gic_check_gicv2(cpu_base))
for (i = 0; i < 4; i++)
writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
/*
* Preserve bypass disable bits to be written back later
*/
@@ -1000,6 +1011,9 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*hwirq += 16;
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
/* Make it clear that broken DTs are... broken */
WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
@@ -1009,6 +1023,8 @@ static int gic_irq_domain_translate(struct irq_domain *d,
*hwirq = fwspec->param[0];
*type = fwspec->param[1];
WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
@@ -1203,11 +1219,11 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
"irqchip/arm/gic:starting",
gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
name = kasprintf(GFP_KERNEL, "GICv2");
gic_init_chip(gic, NULL, name, true);
} else {
@@ -1234,7 +1250,7 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_key_slow_dec(&supports_deactivate);
static_branch_disable(&supports_deactivate_key);
gic = &gic_data[gic_nr];
gic->raw_dist_base = dist_base;
@@ -1264,12 +1280,6 @@ static int __init gicv2_force_probe_cfg(char *buf)
}
early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
static bool gic_check_gicv2(void __iomem *base)
{
u32 val = readl_relaxed(base + GIC_CPU_IDENT);
return (val & 0xff0fff) == 0x02043B;
}
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
{
struct resource cpuif_res;
@@ -1420,7 +1430,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (ret)
return;
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_set_kvm_info(&gic_v2_kvm_info);
}
@@ -1447,7 +1457,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
* or the CPU interface is too small.
*/
if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_key_slow_dec(&supports_deactivate);
static_branch_disable(&supports_deactivate_key);
ret = __gic_init_bases(gic, -1, &node->fwnode);
if (ret) {
@@ -1628,7 +1638,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
* interface will always be the right size.
*/
if (!is_hyp_mode_available())
static_key_slow_dec(&supports_deactivate);
static_branch_disable(&supports_deactivate_key);
/*
* Initialize GIC instance zero (no multi-GIC support).
@@ -1653,7 +1663,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain);
if (static_key_true(&supports_deactivate))
if (static_branch_likely(&supports_deactivate_key))
gic_acpi_setup_kvm_info();
return 0;

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi Ocelot IRQ controller driver
*
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#define ICPU_CFG_INTR_INTR_STICKY 0x10
#define ICPU_CFG_INTR_INTR_ENA 0x18
#define ICPU_CFG_INTR_INTR_ENA_CLR 0x1c
#define ICPU_CFG_INTR_INTR_ENA_SET 0x20
#define ICPU_CFG_INTR_DST_INTR_IDENT(x) (0x38 + 0x4 * (x))
#define ICPU_CFG_INTR_INTR_TRIGGER(x) (0x5c + 0x4 * (x))
#define OCELOT_NR_IRQ 24
static void ocelot_irq_unmask(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct irq_chip_type *ct = irq_data_get_chip_type(data);
unsigned int mask = data->mask;
u32 val;
irq_gc_lock(gc);
val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(0)) |
irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(1));
if (!(val & mask))
irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_STICKY);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ICPU_CFG_INTR_INTR_ENA_SET);
irq_gc_unlock(gc);
}
static void ocelot_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 reg = irq_reg_readl(gc, ICPU_CFG_INTR_DST_INTR_IDENT(0));
chained_irq_enter(chip, desc);
while (reg) {
u32 hwirq = __fls(reg);
generic_handle_irq(irq_find_mapping(d, hwirq));
reg &= ~(BIT(hwirq));
}
chained_irq_exit(chip, desc);
}
static int __init ocelot_irq_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *domain;
struct irq_chip_generic *gc;
int parent_irq, ret;
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq)
return -EINVAL;
domain = irq_domain_add_linear(node, OCELOT_NR_IRQ,
&irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%s: unable to add irq domain\n", node->name);
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(domain, OCELOT_NR_IRQ, 1,
"icpu", handle_level_irq,
0, 0, 0);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", node->name);
goto err_domain_remove;
}
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_iomap(node, 0);
if (!gc->reg_base) {
pr_err("%s: unable to map resource\n", node->name);
ret = -ENOMEM;
goto err_gc_free;
}
gc->chip_types[0].regs.ack = ICPU_CFG_INTR_INTR_STICKY;
gc->chip_types[0].regs.mask = ICPU_CFG_INTR_INTR_ENA_CLR;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_unmask = ocelot_irq_unmask;
/* Mask and ack all interrupts */
irq_reg_writel(gc, 0, ICPU_CFG_INTR_INTR_ENA);
irq_reg_writel(gc, 0xffffffff, ICPU_CFG_INTR_INTR_STICKY);
irq_set_chained_handler_and_data(parent_irq, ocelot_irq_handler,
domain);
return 0;
err_gc_free:
irq_free_generic_chip(gc);
err_domain_remove:
irq_domain_remove(domain);
return ret;
}
IRQCHIP_DECLARE(ocelot_icpu, "mscc,ocelot-icpu-intr", ocelot_irq_init);

View File

@@ -17,7 +17,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -78,16 +77,14 @@ struct intc_irqpin_priv {
struct platform_device *pdev;
struct irq_chip irq_chip;
struct irq_domain *irq_domain;
struct clk *clk;
atomic_t wakeup_path;
unsigned shared_irqs:1;
unsigned needs_clk:1;
u8 shared_irq_mask;
};
struct intc_irqpin_config {
unsigned int irlm_bit;
unsigned needs_irlm:1;
unsigned needs_clk:1;
};
static unsigned long intc_irqpin_read32(void __iomem *iomem)
@@ -287,14 +284,10 @@ static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
int hw_irq = irqd_to_hwirq(d);
irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
if (!p->clk)
return 0;
if (on)
clk_enable(p->clk);
atomic_inc(&p->wakeup_path);
else
clk_disable(p->clk);
atomic_dec(&p->wakeup_path);
return 0;
}
@@ -369,12 +362,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
.irlm_bit = 23, /* ICR0.IRLM0 */
.needs_irlm = 1,
.needs_clk = 0,
};
static const struct intc_irqpin_config intc_irqpin_rmobile = {
.needs_irlm = 0,
.needs_clk = 1,
};
static const struct of_device_id intc_irqpin_dt_ids[] = {
@@ -426,18 +417,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
config = of_device_get_match_data(dev);
if (config)
p->needs_clk = config->needs_clk;
p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
if (p->needs_clk) {
dev_err(dev, "unable to get clock\n");
ret = PTR_ERR(p->clk);
goto err0;
}
p->clk = NULL;
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -606,12 +585,25 @@ static int intc_irqpin_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused intc_irqpin_suspend(struct device *dev)
{
struct intc_irqpin_priv *p = dev_get_drvdata(dev);
if (atomic_read(&p->wakeup_path))
device_set_wakeup_path(dev);
return 0;
}
static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
static struct platform_driver intc_irqpin_device_driver = {
.probe = intc_irqpin_probe,
.remove = intc_irqpin_remove,
.driver = {
.name = "renesas_intc_irqpin",
.of_match_table = intc_irqpin_dt_ids,
.pm = &intc_irqpin_pm_ops,
}
};

View File

@@ -17,7 +17,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -64,7 +63,7 @@ struct irqc_priv {
struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_domain *irq_domain;
struct clk *clk;
atomic_t wakeup_path;
};
static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,14 +110,10 @@ static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
int hw_irq = irqd_to_hwirq(d);
irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
if (!p->clk)
return 0;
if (on)
clk_enable(p->clk);
atomic_inc(&p->wakeup_path);
else
clk_disable(p->clk);
atomic_dec(&p->wakeup_path);
return 0;
}
@@ -159,12 +154,6 @@ static int irqc_probe(struct platform_device *pdev)
p->pdev = pdev;
platform_set_drvdata(pdev, p);
p->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_warn(&pdev->dev, "unable to get clock\n");
p->clk = NULL;
}
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -276,6 +265,18 @@ static int irqc_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused irqc_suspend(struct device *dev)
{
struct irqc_priv *p = dev_get_drvdata(dev);
if (atomic_read(&p->wakeup_path))
device_set_wakeup_path(dev);
return 0;
}
static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
static const struct of_device_id irqc_dt_ids[] = {
{ .compatible = "renesas,irqc", },
{},
@@ -288,6 +289,7 @@ static struct platform_driver irqc_device_driver = {
.driver = {
.name = "renesas_irqc",
.of_match_table = irqc_dt_ids,
.pm = &irqc_pm_ops,
}
};

311
drivers/irqchip/qcom-pdc.c Normal file
View File

@@ -0,0 +1,311 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#define PDC_MAX_IRQS 126
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
u32 cnt;
};
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static void pdc_reg_write(int reg, u32 i, u32 val)
{
writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
}
static u32 pdc_reg_read(int reg, u32 i)
{
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
u32 index, mask;
u32 enable;
index = pin_out / 32;
mask = pin_out % 32;
raw_spin_lock(&pdc_lock);
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
raw_spin_unlock(&pdc_lock);
}
static void qcom_pdc_gic_mask(struct irq_data *d)
{
pdc_enable_intr(d, false);
irq_chip_mask_parent(d);
}
static void qcom_pdc_gic_unmask(struct irq_data *d)
{
pdc_enable_intr(d, true);
irq_chip_unmask_parent(d);
}
/*
* GIC does not handle falling edge or active low. To allow falling edge and
* active low interrupts to be handled at GIC, PDC has an inverter that inverts
* falling edge into a rising edge and active low into an active high.
* For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
* set as per the table below.
* Level sensitive active low LOW
* Rising edge sensitive NOT USED
* Falling edge sensitive LOW
* Dual Edge sensitive NOT USED
* Level sensitive active High HIGH
* Falling Edge sensitive NOT USED
* Rising edge sensitive HIGH
* Dual Edge sensitive HIGH
*/
enum pdc_irq_config_bits {
PDC_LEVEL_LOW = 0b000,
PDC_EDGE_FALLING = 0b010,
PDC_LEVEL_HIGH = 0b100,
PDC_EDGE_RISING = 0b110,
PDC_EDGE_DUAL = 0b111,
};
/**
* qcom_pdc_gic_set_type: Configure PDC for the interrupt
*
* @d: the interrupt data
* @type: the interrupt type
*
* If @type is edge triggered, forward that as Rising edge as PDC
* takes care of converting falling edge to rising edge signal
* If @type is level, then forward that as level high as PDC
* takes care of converting falling edge to rising edge signal
*/
static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
{
int pin_out = d->hwirq;
enum pdc_irq_config_bits pdc_type;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
pdc_type = PDC_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
pdc_type = PDC_EDGE_FALLING;
type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_BOTH:
pdc_type = PDC_EDGE_DUAL;
break;
case IRQ_TYPE_LEVEL_HIGH:
pdc_type = PDC_LEVEL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
pdc_type = PDC_LEVEL_LOW;
type = IRQ_TYPE_LEVEL_HIGH;
break;
default:
WARN_ON(1);
return -EINVAL;
}
pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
return irq_chip_set_type_parent(d, type);
}
static struct irq_chip qcom_pdc_gic_chip = {
.name = "PDC",
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = qcom_pdc_gic_mask,
.irq_unmask = qcom_pdc_gic_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE,
.irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static irq_hw_number_t get_parent_hwirq(int pin)
{
int i;
struct pdc_pin_region *region;
for (i = 0; i < pdc_region_cnt; i++) {
region = &pdc_region[i];
if (pin >= region->pin_base &&
pin < region->pin_base + region->cnt)
return (region->parent_base + pin - region->pin_base);
}
WARN_ON(1);
return ~0UL;
}
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
if (is_of_node(fwspec->fwnode)) {
if (fwspec->param_count != 2)
return -EINVAL;
*hwirq = fwspec->param[0];
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
return 0;
}
return -EINVAL;
}
static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *data)
{
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq, parent_hwirq;
unsigned int type;
int ret;
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret)
return -EINVAL;
parent_hwirq = get_parent_hwirq(hwirq);
if (parent_hwirq == ~0UL)
return -EINVAL;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL);
if (ret)
return ret;
if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING;
if (type & IRQ_TYPE_LEVEL_MASK)
type = IRQ_TYPE_LEVEL_HIGH;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0;
parent_fwspec.param[1] = parent_hwirq;
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
&parent_fwspec);
}
static const struct irq_domain_ops qcom_pdc_ops = {
.translate = qcom_pdc_translate,
.alloc = qcom_pdc_alloc,
.free = irq_domain_free_irqs_common,
};
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n;
n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
if (n <= 0 || n % 3)
return -EINVAL;
pdc_region_cnt = n / 3;
pdc_region = kcalloc(pdc_region_cnt, sizeof(*pdc_region), GFP_KERNEL);
if (!pdc_region) {
pdc_region_cnt = 0;
return -ENOMEM;
}
for (n = 0; n < pdc_region_cnt; n++) {
ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
n * 3 + 0,
&pdc_region[n].pin_base);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
n * 3 + 1,
&pdc_region[n].parent_base);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,pdc-ranges",
n * 3 + 2,
&pdc_region[n].cnt);
if (ret)
return ret;
}
return 0;
}
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *parent_domain, *pdc_domain;
int ret;
pdc_base = of_iomap(node, 0);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
return -ENXIO;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%pOF: unable to find PDC's parent domain\n", node);
ret = -ENXIO;
goto fail;
}
ret = pdc_setup_pin_mapping(node);
if (ret) {
pr_err("%pOF: failed to init PDC pin-hwirq mapping\n", node);
goto fail;
}
pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS,
of_fwnode_handle(node),
&qcom_pdc_ops, NULL);
if (!pdc_domain) {
pr_err("%pOF: GIC domain add failed\n", node);
ret = -ENOMEM;
goto fail;
}
return 0;
fail:
kfree(pdc_region);
iounmap(pdc_base);
return ret;
}
IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);