Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "This updated pull request does not contain the last few GIC related patches which were reported to cause a regression. There is a fix available, but I let it breed for a couple of days first. The irq departement provides: - new infrastructure to support non PCI based MSI interrupts - a couple of new irq chip drivers - the usual pile of fixlets and updates to irq chip drivers - preparatory changes for removal of the irq argument from interrupt flow handlers - preparatory changes to remove IRQF_VALID" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (129 commits) irqchip/imx-gpcv2: IMX GPCv2 driver for wakeup sources irqchip: Add bcm2836 interrupt controller for Raspberry Pi 2 irqchip: Add documentation for the bcm2836 interrupt controller irqchip/bcm2835: Add support for being used as a second level controller irqchip/bcm2835: Refactor handle_IRQ() calls out of MAKE_HWIRQ PCI: xilinx: Fix typo in function name irqchip/gic: Ensure gic_cpu_if_up/down() programs correct GIC instance irqchip/gic: Only allow the primary GIC to set the CPU map PCI/MSI: pci-xgene-msi: Consolidate chained IRQ handler install/remove unicore32/irq: Prepare puv3_gpio_handler for irq argument removal tile/pci_gx: Prepare trio_handle_level_irq for irq argument removal m68k/irq: Prepare irq handlers for irq argument removal C6X/megamode-pic: Prepare megamod_irq_cascade for irq argument removal blackfin: Prepare irq handlers for irq argument removal arc/irq: Prepare idu_cascade_isr for irq argument removal sparc/irq: Use access helper irq_data_get_affinity_mask() sparc/irq: Use helper irq_data_get_irq_handler_data() parisc/irq: Use access helper irq_data_get_affinity_mask() mn10300/irq: Use access helper irq_data_get_affinity_mask() irqchip/i8259: Prepare i8259_irq_dispatch for irq argument removal ...
This commit is contained in:
@@ -104,14 +104,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
|
||||
{
|
||||
u32 offset, reg_offset, bit_pos;
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
msi = irq_data_get_msi_desc(d);
|
||||
pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
update_reg_offset_bit_pos(offset, ®_offset, &bit_pos);
|
||||
|
||||
writel(BIT(bit_pos),
|
||||
@@ -142,15 +141,14 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
|
||||
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
u32 offset;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
msi = irq_data_get_msi_desc(d);
|
||||
pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
|
||||
/* Mask the end point if PVM implemented */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
@@ -164,15 +162,14 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
|
||||
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie;
|
||||
unsigned int irq = d->irq;
|
||||
struct msi_desc *msi;
|
||||
struct pcie_port *pp;
|
||||
u32 offset;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
msi = irq_data_get_msi_desc(d);
|
||||
pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
|
||||
ks_pcie = to_keystone_pcie(pp);
|
||||
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
|
||||
|
||||
/* Mask the end point if PVM implemented */
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
|
@@ -110,8 +110,9 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
||||
u32 offset = irq - ks_pcie->msi_host_irqs[0];
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
@@ -137,8 +138,10 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
* Traverse through pending legacy interrupts and invoke handler for each. Also
|
||||
* takes care of interrupt controller level mask/ack operation.
|
||||
*/
|
||||
static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
static void ks_pcie_legacy_irq_handler(unsigned int __irq,
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
||||
struct pcie_port *pp = &ks_pcie->pp;
|
||||
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
|
||||
@@ -212,9 +215,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
|
||||
|
||||
/* Legacy IRQ */
|
||||
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
|
||||
irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
|
||||
irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
|
||||
ks_pcie_legacy_irq_handler);
|
||||
irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
|
||||
ks_pcie_legacy_irq_handler,
|
||||
ks_pcie);
|
||||
}
|
||||
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
|
||||
|
||||
|
@@ -40,8 +40,8 @@ struct xgene_msi_group {
|
||||
|
||||
struct xgene_msi {
|
||||
struct device_node *node;
|
||||
struct msi_controller mchip;
|
||||
struct irq_domain *domain;
|
||||
struct irq_domain *inner_domain;
|
||||
struct irq_domain *msi_domain;
|
||||
u64 msi_addr;
|
||||
void __iomem *msi_regs;
|
||||
unsigned long *bitmap;
|
||||
@@ -251,17 +251,17 @@ static const struct irq_domain_ops msi_domain_ops = {
|
||||
|
||||
static int xgene_allocate_domains(struct xgene_msi *msi)
|
||||
{
|
||||
msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
|
||||
&msi_domain_ops, msi);
|
||||
if (!msi->domain)
|
||||
msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
|
||||
&msi_domain_ops, msi);
|
||||
if (!msi->inner_domain)
|
||||
return -ENOMEM;
|
||||
|
||||
msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
|
||||
&xgene_msi_domain_info,
|
||||
msi->domain);
|
||||
msi->msi_domain = pci_msi_create_irq_domain(msi->node,
|
||||
&xgene_msi_domain_info,
|
||||
msi->inner_domain);
|
||||
|
||||
if (!msi->mchip.domain) {
|
||||
irq_domain_remove(msi->domain);
|
||||
if (!msi->msi_domain) {
|
||||
irq_domain_remove(msi->inner_domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -270,10 +270,10 @@ static int xgene_allocate_domains(struct xgene_msi *msi)
|
||||
|
||||
static void xgene_free_domains(struct xgene_msi *msi)
|
||||
{
|
||||
if (msi->mchip.domain)
|
||||
irq_domain_remove(msi->mchip.domain);
|
||||
if (msi->domain)
|
||||
irq_domain_remove(msi->domain);
|
||||
if (msi->msi_domain)
|
||||
irq_domain_remove(msi->msi_domain);
|
||||
if (msi->inner_domain)
|
||||
irq_domain_remove(msi->inner_domain);
|
||||
}
|
||||
|
||||
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
|
||||
@@ -339,7 +339,7 @@ static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
|
||||
* CPU0
|
||||
*/
|
||||
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
|
||||
virq = irq_find_mapping(xgene_msi->domain, hw_irq);
|
||||
virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
|
||||
WARN_ON(!virq);
|
||||
if (virq != 0)
|
||||
generic_handle_irq(virq);
|
||||
@@ -367,10 +367,8 @@ static int xgene_msi_remove(struct platform_device *pdev)
|
||||
|
||||
for (i = 0; i < NR_HW_IRQS; i++) {
|
||||
virq = msi->msi_groups[i].gic_irq;
|
||||
if (virq != 0) {
|
||||
irq_set_chained_handler(virq, NULL);
|
||||
irq_set_handler_data(virq, NULL);
|
||||
}
|
||||
if (virq != 0)
|
||||
irq_set_chained_handler_and_data(virq, NULL, NULL);
|
||||
}
|
||||
kfree(msi->msi_groups);
|
||||
|
||||
@@ -420,8 +418,8 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
|
||||
}
|
||||
|
||||
if (err) {
|
||||
irq_set_chained_handler(msi_group->gic_irq, NULL);
|
||||
irq_set_handler_data(msi_group->gic_irq, NULL);
|
||||
irq_set_chained_handler_and_data(msi_group->gic_irq,
|
||||
NULL, NULL);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@@ -440,8 +438,8 @@ static void xgene_msi_hwirq_free(unsigned int cpu)
|
||||
if (!msi_group->gic_irq)
|
||||
continue;
|
||||
|
||||
irq_set_chained_handler(msi_group->gic_irq, NULL);
|
||||
irq_set_handler_data(msi_group->gic_irq, NULL);
|
||||
irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -496,7 +494,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
|
||||
goto error;
|
||||
}
|
||||
xgene_msi->msi_addr = res->start;
|
||||
|
||||
xgene_msi->node = pdev->dev.of_node;
|
||||
xgene_msi->num_cpus = num_possible_cpus();
|
||||
|
||||
rc = xgene_msi_init_allocator(xgene_msi);
|
||||
@@ -560,19 +558,10 @@ static int xgene_msi_probe(struct platform_device *pdev)
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
xgene_msi->mchip.of_node = pdev->dev.of_node;
|
||||
rc = of_pci_msi_chip_add(&xgene_msi->mchip);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "failed to add MSI controller chip\n");
|
||||
goto error_notifier;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
|
||||
|
||||
return 0;
|
||||
|
||||
error_notifier:
|
||||
unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
|
||||
error:
|
||||
xgene_msi_remove(pdev);
|
||||
return rc;
|
||||
|
@@ -255,7 +255,7 @@ static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
|
||||
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
|
||||
{
|
||||
int irq, pos0, i;
|
||||
struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
|
||||
struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
|
||||
|
||||
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
|
||||
order_base_2(no_irqs));
|
||||
@@ -326,8 +326,8 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
|
||||
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
|
||||
{
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct msi_desc *msi = irq_data_get_msi(data);
|
||||
struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
struct msi_desc *msi = irq_data_get_msi_desc(data);
|
||||
struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
|
||||
|
||||
clear_irq_range(pp, irq, 1, data->hwirq);
|
||||
}
|
||||
|
@@ -227,18 +227,16 @@ static struct pci_ops xilinx_pcie_ops = {
|
||||
*/
|
||||
static void xilinx_pcie_destroy_msi(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct msi_desc *msi;
|
||||
struct xilinx_pcie_port *port;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
msi = irq_desc_get_msi_desc(desc);
|
||||
port = sys_to_pcie(msi->dev->bus->sysdata);
|
||||
|
||||
if (!test_bit(irq, msi_irq_in_use))
|
||||
if (!test_bit(irq, msi_irq_in_use)) {
|
||||
msi = irq_get_msi_desc(irq);
|
||||
port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
|
||||
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
|
||||
else
|
||||
} else {
|
||||
clear_bit(irq, msi_irq_in_use);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -39,14 +39,13 @@ struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
|
||||
|
||||
static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
|
||||
{
|
||||
struct irq_domain *domain = NULL;
|
||||
struct irq_domain *domain;
|
||||
|
||||
if (dev->bus->msi)
|
||||
domain = dev->bus->msi->domain;
|
||||
if (!domain)
|
||||
domain = arch_get_pci_msi_domain(dev);
|
||||
domain = dev_get_msi_domain(&dev->dev);
|
||||
if (domain)
|
||||
return domain;
|
||||
|
||||
return domain;
|
||||
return arch_get_pci_msi_domain(dev);
|
||||
}
|
||||
|
||||
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||
@@ -116,7 +115,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
||||
if (type == PCI_CAP_ID_MSI && nvec > 1)
|
||||
return 1;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
ret = arch_setup_msi_irq(dev, entry);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -136,7 +135,7 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
|
||||
int i;
|
||||
struct msi_desc *entry;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list)
|
||||
for_each_pci_msi_entry(entry, dev)
|
||||
if (entry->irq)
|
||||
for (i = 0; i < entry->nvec_used; i++)
|
||||
arch_teardown_msi_irq(entry->irq + i);
|
||||
@@ -153,7 +152,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq)
|
||||
|
||||
entry = NULL;
|
||||
if (dev->msix_enabled) {
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
if (irq == entry->irq)
|
||||
break;
|
||||
}
|
||||
@@ -193,7 +192,8 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
|
||||
mask_bits &= ~mask;
|
||||
mask_bits |= flag;
|
||||
pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
|
||||
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
|
||||
mask_bits);
|
||||
|
||||
return mask_bits;
|
||||
}
|
||||
@@ -234,7 +234,7 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
|
||||
|
||||
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
|
||||
{
|
||||
struct msi_desc *desc = irq_data_get_msi(data);
|
||||
struct msi_desc *desc = irq_data_get_msi_desc(data);
|
||||
|
||||
if (desc->msi_attrib.is_msix) {
|
||||
msix_mask_irq(desc, flag);
|
||||
@@ -267,13 +267,15 @@ void default_restore_msi_irqs(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list)
|
||||
for_each_pci_msi_entry(entry, dev)
|
||||
default_restore_msi_irq(dev, entry->irq);
|
||||
}
|
||||
|
||||
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
BUG_ON(entry->dev->current_state != PCI_D0);
|
||||
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
|
||||
|
||||
BUG_ON(dev->current_state != PCI_D0);
|
||||
|
||||
if (entry->msi_attrib.is_msix) {
|
||||
void __iomem *base = entry->mask_base +
|
||||
@@ -283,7 +285,6 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
|
||||
} else {
|
||||
struct pci_dev *dev = entry->dev;
|
||||
int pos = dev->msi_cap;
|
||||
u16 data;
|
||||
|
||||
@@ -303,7 +304,9 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
|
||||
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
if (entry->dev->current_state != PCI_D0) {
|
||||
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
|
||||
|
||||
if (dev->current_state != PCI_D0) {
|
||||
/* Don't touch the hardware now */
|
||||
} else if (entry->msi_attrib.is_msix) {
|
||||
void __iomem *base;
|
||||
@@ -314,7 +317,6 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
|
||||
} else {
|
||||
struct pci_dev *dev = entry->dev;
|
||||
int pos = dev->msi_cap;
|
||||
u16 msgctl;
|
||||
|
||||
@@ -348,21 +350,22 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
|
||||
|
||||
static void free_msi_irqs(struct pci_dev *dev)
|
||||
{
|
||||
struct list_head *msi_list = dev_to_msi_list(&dev->dev);
|
||||
struct msi_desc *entry, *tmp;
|
||||
struct attribute **msi_attrs;
|
||||
struct device_attribute *dev_attr;
|
||||
int i, count = 0;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list)
|
||||
for_each_pci_msi_entry(entry, dev)
|
||||
if (entry->irq)
|
||||
for (i = 0; i < entry->nvec_used; i++)
|
||||
BUG_ON(irq_has_action(entry->irq + i));
|
||||
|
||||
pci_msi_teardown_msi_irqs(dev);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
|
||||
list_for_each_entry_safe(entry, tmp, msi_list, list) {
|
||||
if (entry->msi_attrib.is_msix) {
|
||||
if (list_is_last(&entry->list, &dev->msi_list))
|
||||
if (list_is_last(&entry->list, msi_list))
|
||||
iounmap(entry->mask_base);
|
||||
}
|
||||
|
||||
@@ -387,18 +390,6 @@ static void free_msi_irqs(struct pci_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&desc->list);
|
||||
desc->dev = dev;
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
|
||||
{
|
||||
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
|
||||
@@ -433,7 +424,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
|
||||
|
||||
if (!dev->msix_enabled)
|
||||
return;
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
|
||||
|
||||
/* route the table */
|
||||
pci_intx_for_msi(dev, 0);
|
||||
@@ -441,7 +432,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
|
||||
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
|
||||
|
||||
arch_restore_msi_irqs(dev);
|
||||
list_for_each_entry(entry, &dev->msi_list, list)
|
||||
for_each_pci_msi_entry(entry, dev)
|
||||
msix_mask_irq(entry, entry->masked);
|
||||
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
|
||||
@@ -486,7 +477,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
|
||||
int count = 0;
|
||||
|
||||
/* Determine how many msi entries we have */
|
||||
list_for_each_entry(entry, &pdev->msi_list, list)
|
||||
for_each_pci_msi_entry(entry, pdev)
|
||||
++num_msi;
|
||||
if (!num_msi)
|
||||
return 0;
|
||||
@@ -495,7 +486,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
|
||||
msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
|
||||
if (!msi_attrs)
|
||||
return -ENOMEM;
|
||||
list_for_each_entry(entry, &pdev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, pdev) {
|
||||
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
|
||||
if (!msi_dev_attr)
|
||||
goto error_attrs;
|
||||
@@ -553,7 +544,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
|
||||
struct msi_desc *entry;
|
||||
|
||||
/* MSI Entry Initialization */
|
||||
entry = alloc_msi_entry(dev);
|
||||
entry = alloc_msi_entry(&dev->dev);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
@@ -584,7 +575,7 @@ static int msi_verify_entries(struct pci_dev *dev)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
if (!dev->no_64bit_msi || !entry->msg.address_hi)
|
||||
continue;
|
||||
dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
|
||||
@@ -621,7 +612,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
||||
mask = msi_mask(entry->msi_attrib.multi_cap);
|
||||
msi_mask_irq(entry, mask, mask);
|
||||
|
||||
list_add_tail(&entry->list, &dev->msi_list);
|
||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
||||
|
||||
/* Configure MSI capability structure */
|
||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
||||
@@ -682,7 +673,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nvec; i++) {
|
||||
entry = alloc_msi_entry(dev);
|
||||
entry = alloc_msi_entry(&dev->dev);
|
||||
if (!entry) {
|
||||
if (!i)
|
||||
iounmap(base);
|
||||
@@ -699,7 +690,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
entry->mask_base = base;
|
||||
entry->nvec_used = 1;
|
||||
|
||||
list_add_tail(&entry->list, &dev->msi_list);
|
||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -711,7 +702,7 @@ static void msix_program_entries(struct pci_dev *dev,
|
||||
struct msi_desc *entry;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
|
||||
@@ -792,7 +783,7 @@ out_avail:
|
||||
struct msi_desc *entry;
|
||||
int avail = 0;
|
||||
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
if (entry->irq != 0)
|
||||
avail++;
|
||||
}
|
||||
@@ -881,8 +872,8 @@ void pci_msi_shutdown(struct pci_dev *dev)
|
||||
if (!pci_msi_enable || !dev || !dev->msi_enabled)
|
||||
return;
|
||||
|
||||
BUG_ON(list_empty(&dev->msi_list));
|
||||
desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
|
||||
BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
|
||||
desc = first_pci_msi_entry(dev);
|
||||
|
||||
pci_msi_set_enable(dev, 0);
|
||||
pci_intx_for_msi(dev, 1);
|
||||
@@ -988,7 +979,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
|
||||
return;
|
||||
|
||||
/* Return the device with MSI-X masked as initial states */
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
/* Keep cached states to be restored */
|
||||
__pci_msix_desc_mask_irq(entry, 1);
|
||||
}
|
||||
@@ -1028,7 +1019,6 @@ EXPORT_SYMBOL(pci_msi_enabled);
|
||||
|
||||
void pci_msi_init_pci_dev(struct pci_dev *dev)
|
||||
{
|
||||
INIT_LIST_HEAD(&dev->msi_list);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1125,6 +1115,19 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msix_range);
|
||||
|
||||
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
||||
{
|
||||
return to_pci_dev(desc->dev);
|
||||
}
|
||||
|
||||
void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
|
||||
{
|
||||
struct pci_dev *dev = msi_desc_to_pci_dev(desc);
|
||||
|
||||
return dev->bus->sysdata;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
|
||||
/**
|
||||
* pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
|
||||
@@ -1133,7 +1136,7 @@ EXPORT_SYMBOL(pci_enable_msix_range);
|
||||
*/
|
||||
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *desc = irq_data->msi_desc;
|
||||
struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
|
||||
|
||||
/*
|
||||
* For MSI-X desc->irq is always equal to irq_data->irq. For
|
||||
@@ -1257,12 +1260,19 @@ struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
|
||||
struct msi_domain_info *info,
|
||||
struct irq_domain *parent)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
|
||||
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
|
||||
pci_msi_domain_update_dom_ops(info);
|
||||
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
|
||||
pci_msi_domain_update_chip_ops(info);
|
||||
|
||||
return msi_create_irq_domain(node, info, parent);
|
||||
domain = msi_create_irq_domain(node, info, parent);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->bus_token = DOMAIN_BUS_PCI_MSI;
|
||||
return domain;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -9,6 +9,7 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/of.h>
|
||||
@@ -59,3 +60,32 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
|
||||
return of_node_get(bus->bridge->parent->of_node);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
|
||||
{
|
||||
#ifdef CONFIG_IRQ_DOMAIN
|
||||
struct device_node *np;
|
||||
struct irq_domain *d;
|
||||
|
||||
if (!bus->dev.of_node)
|
||||
return NULL;
|
||||
|
||||
/* Start looking for a phandle to an MSI controller. */
|
||||
np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
|
||||
|
||||
/*
|
||||
* If we don't have an msi-parent property, look for a domain
|
||||
* directly attached to the host bridge.
|
||||
*/
|
||||
if (!np)
|
||||
np = bus->dev.of_node;
|
||||
|
||||
d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
|
||||
if (d)
|
||||
return d;
|
||||
|
||||
return irq_find_host(np);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
@@ -661,6 +661,35 @@ static void pci_set_bus_speed(struct pci_bus *bus)
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
|
||||
{
|
||||
struct irq_domain *d;
|
||||
|
||||
/*
|
||||
* Any firmware interface that can resolve the msi_domain
|
||||
* should be called from here.
|
||||
*/
|
||||
d = pci_host_bridge_of_msi_domain(bus);
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
static void pci_set_bus_msi_domain(struct pci_bus *bus)
|
||||
{
|
||||
struct irq_domain *d;
|
||||
|
||||
/*
|
||||
* Either bus is the root, and we must obtain it from the
|
||||
* firmware, or we inherit it from the bridge device.
|
||||
*/
|
||||
if (pci_is_root_bus(bus))
|
||||
d = pci_host_bridge_msi_domain(bus);
|
||||
else
|
||||
d = dev_get_msi_domain(&bus->self->dev);
|
||||
|
||||
dev_set_msi_domain(&bus->dev, d);
|
||||
}
|
||||
|
||||
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
|
||||
struct pci_dev *bridge, int busnr)
|
||||
{
|
||||
@@ -714,6 +743,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
|
||||
bridge->subordinate = child;
|
||||
|
||||
add_dev:
|
||||
pci_set_bus_msi_domain(child);
|
||||
ret = device_register(&child->dev);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
@@ -1594,6 +1624,17 @@ static void pci_init_capabilities(struct pci_dev *dev)
|
||||
pci_enable_acs(dev);
|
||||
}
|
||||
|
||||
static void pci_set_msi_domain(struct pci_dev *dev)
|
||||
{
|
||||
/*
|
||||
* If no domain has been set through the pcibios_add_device
|
||||
* callback, inherit the default from the bus device.
|
||||
*/
|
||||
if (!dev_get_msi_domain(&dev->dev))
|
||||
dev_set_msi_domain(&dev->dev,
|
||||
dev_get_msi_domain(&dev->bus->dev));
|
||||
}
|
||||
|
||||
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
|
||||
{
|
||||
int ret;
|
||||
@@ -1635,6 +1676,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
|
||||
ret = pcibios_add_device(dev);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
/* Setup MSI irq domain */
|
||||
pci_set_msi_domain(dev);
|
||||
|
||||
/* Notifier could use PCI capabilities */
|
||||
dev->match_driver = false;
|
||||
ret = device_add(&dev->dev);
|
||||
@@ -2008,6 +2052,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
|
||||
b->bridge = get_device(&bridge->dev);
|
||||
device_enable_async_suspend(b->bridge);
|
||||
pci_set_bus_of_node(b);
|
||||
pci_set_bus_msi_domain(b);
|
||||
|
||||
if (!parent)
|
||||
set_dev_node(b->bridge, pcibus_to_node(b));
|
||||
|
@@ -265,7 +265,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
|
||||
}
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(entry, &dev->msi_list, list) {
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
|
||||
/* Vector is useless at this point. */
|
||||
op.msix_entries[i].vector = -1;
|
||||
|
Reference in New Issue
Block a user