123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Synopsys DesignWare PCIe host controller driver
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * https://www.samsung.com
- *
- * Author: Jingoo Han <[email protected]>
- */
- #include <linux/irqchip/chained_irq.h>
- #include <linux/irqdomain.h>
- #include <linux/msi.h>
- #include <linux/of_address.h>
- #include <linux/of_pci.h>
- #include <linux/pci_regs.h>
- #include <linux/platform_device.h>
- #include "../../pci.h"
- #include "pcie-designware.h"
- static struct pci_ops dw_pcie_ops;
- static struct pci_ops dw_child_pcie_ops;
- static void dw_msi_ack_irq(struct irq_data *d)
- {
- irq_chip_ack_parent(d);
- }
- static void dw_msi_mask_irq(struct irq_data *d)
- {
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
- }
- static void dw_msi_unmask_irq(struct irq_data *d)
- {
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
- }
- static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
- };
- static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &dw_pcie_msi_irq_chip,
- };
- /* MSI int handler */
- irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
- {
- int i, pos;
- unsigned long val;
- u32 status, num_ctrls;
- irqreturn_t ret = IRQ_NONE;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- for (i = 0; i < num_ctrls; i++) {
- status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE));
- if (!status)
- continue;
- ret = IRQ_HANDLED;
- val = status;
- pos = 0;
- while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
- pos)) != MAX_MSI_IRQS_PER_CTRL) {
- generic_handle_domain_irq(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- pos++;
- }
- }
- return ret;
- }
- EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
- /* Chained MSI interrupt service routine */
- static void dw_chained_msi_isr(struct irq_desc *desc)
- {
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct dw_pcie_rp *pp;
- chained_irq_enter(chip, desc);
- pp = irq_desc_get_handler_data(desc);
- dw_handle_msi_irq(pp);
- chained_irq_exit(chip, desc);
- }
- static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
- {
- struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u64 msi_target;
- msi_target = (u64)pp->msi_data;
- msg->address_lo = lower_32_bits(msi_target);
- msg->address_hi = upper_32_bits(msi_target);
- msg->data = d->hwirq;
- dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
- (int)d->hwirq, msg->address_hi, msg->address_lo);
- }
- static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
- {
- return -EINVAL;
- }
- static void dw_pci_bottom_mask(struct irq_data *d)
- {
- struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
- raw_spin_lock_irqsave(&pp->lock, flags);
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
- raw_spin_unlock_irqrestore(&pp->lock, flags);
- }
- static void dw_pci_bottom_unmask(struct irq_data *d)
- {
- struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
- raw_spin_lock_irqsave(&pp->lock, flags);
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
- raw_spin_unlock_irqrestore(&pp->lock, flags);
- }
- static void dw_pci_bottom_ack(struct irq_data *d)
- {
- struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
- }
- static struct irq_chip dw_pci_msi_bottom_irq_chip = {
- .name = "DWPCI-MSI",
- .irq_ack = dw_pci_bottom_ack,
- .irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
- .irq_mask = dw_pci_bottom_mask,
- .irq_unmask = dw_pci_bottom_unmask,
- };
- static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs,
- void *args)
- {
- struct dw_pcie_rp *pp = domain->host_data;
- unsigned long flags;
- u32 i;
- int bit;
- raw_spin_lock_irqsave(&pp->lock, flags);
- bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
- order_base_2(nr_irqs));
- raw_spin_unlock_irqrestore(&pp->lock, flags);
- if (bit < 0)
- return -ENOSPC;
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, bit + i,
- pp->msi_irq_chip,
- pp, handle_edge_irq,
- NULL, NULL);
- return 0;
- }
- static void dw_pcie_irq_domain_free(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs)
- {
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct dw_pcie_rp *pp = domain->host_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&pp->lock, flags);
- bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
- order_base_2(nr_irqs));
- raw_spin_unlock_irqrestore(&pp->lock, flags);
- }
- static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
- .alloc = dw_pcie_irq_domain_alloc,
- .free = dw_pcie_irq_domain_free,
- };
- int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
- if (!pp->irq_domain) {
- dev_err(pci->dev, "Failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
- return 0;
- }
- static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
- {
- u32 ctrl;
- for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
- if (pp->msi_irq[ctrl] > 0)
- irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
- NULL, NULL);
- }
- irq_domain_remove(pp->msi_domain);
- irq_domain_remove(pp->irq_domain);
- }
- static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u64 msi_target = (u64)pp->msi_data;
- if (!pci_msi_enabled() || !pp->has_msi_ctrl)
- return;
- /* Program the msi_data */
- dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
- dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
- }
- static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- u32 ctrl, max_vectors;
- int irq;
- /* Parse any "msiX" IRQs described in the devicetree */
- for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
- char msi_name[] = "msiX";
- msi_name[3] = '0' + ctrl;
- irq = platform_get_irq_byname_optional(pdev, msi_name);
- if (irq == -ENXIO)
- break;
- if (irq < 0)
- return dev_err_probe(dev, irq,
- "Failed to parse MSI IRQ '%s'\n",
- msi_name);
- pp->msi_irq[ctrl] = irq;
- }
- /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
- if (ctrl == 0)
- return -ENXIO;
- max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
- if (pp->num_vectors > max_vectors) {
- dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
- max_vectors);
- pp->num_vectors = max_vectors;
- }
- if (!pp->num_vectors)
- pp->num_vectors = max_vectors;
- return 0;
- }
- static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
- int ret;
- u32 ctrl, num_ctrls;
- for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
- pp->irq_mask[ctrl] = ~0;
- if (!pp->msi_irq[0]) {
- ret = dw_pcie_parse_split_msi_irq(pp);
- if (ret < 0 && ret != -ENXIO)
- return ret;
- }
- if (!pp->num_vectors)
- pp->num_vectors = MSI_DEF_NUM_VECTORS;
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- if (!pp->msi_irq[0]) {
- pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq[0] < 0) {
- pp->msi_irq[0] = platform_get_irq(pdev, 0);
- if (pp->msi_irq[0] < 0)
- return pp->msi_irq[0];
- }
- }
- dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
- pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- if (pp->msi_irq[ctrl] > 0)
- irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
- dw_chained_msi_isr, pp);
- }
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
- if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
- }
- return 0;
- }
- int dw_pcie_host_init(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource_entry *win;
- struct pci_host_bridge *bridge;
- struct resource *res;
- int ret;
- raw_spin_lock_init(&pp->lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
- return -ENODEV;
- }
- if (!pci->dbi_base) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- }
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
- pp->bridge = bridge;
- /* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
- if (win) {
- pp->io_size = resource_size(win->res);
- pp->io_bus_addr = win->res->start - win->offset;
- pp->io_base = pci_pio_to_address(win->res->start);
- }
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
- if (ret)
- return ret;
- }
- if (pci_msi_enabled()) {
- pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
- of_property_read_bool(np, "msi-parent") ||
- of_property_read_bool(np, "msi-map"));
- /*
- * For the has_msi_ctrl case the default assignment is handled
- * in the dw_pcie_msi_host_init().
- */
- if (!pp->has_msi_ctrl && !pp->num_vectors) {
- pp->num_vectors = MSI_DEF_NUM_VECTORS;
- } else if (pp->num_vectors > MAX_MSI_IRQS) {
- dev_err(dev, "Invalid number of vectors\n");
- ret = -EINVAL;
- goto err_deinit_host;
- }
- if (pp->ops->msi_host_init) {
- ret = pp->ops->msi_host_init(pp);
- if (ret < 0)
- goto err_deinit_host;
- } else if (pp->has_msi_ctrl) {
- ret = dw_pcie_msi_host_init(pp);
- if (ret < 0)
- goto err_deinit_host;
- }
- }
- dw_pcie_version_detect(pci);
- dw_pcie_iatu_detect(pci);
- ret = dw_pcie_setup_rc(pp);
- if (ret)
- goto err_free_msi;
- if (!dw_pcie_link_up(pci)) {
- ret = dw_pcie_start_link(pci);
- if (ret)
- goto err_free_msi;
- if (pci->ops && pci->ops->start_link) {
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
- }
- }
- bridge->sysdata = pp;
- ret = pci_host_probe(bridge);
- if (ret)
- goto err_stop_link;
- return 0;
- err_stop_link:
- dw_pcie_stop_link(pci);
- err_free_msi:
- if (pp->has_msi_ctrl)
- dw_pcie_free_msi(pp);
- err_deinit_host:
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
- return ret;
- }
- EXPORT_SYMBOL_GPL(dw_pcie_host_init);
- void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- pci_stop_root_bus(pp->bridge->bus);
- pci_remove_root_bus(pp->bridge->bus);
- dw_pcie_stop_link(pci);
- if (pp->has_msi_ctrl)
- dw_pcie_free_msi(pp);
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
- }
- EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
- static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
- {
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int type, ret;
- u32 busdev;
- /*
- * Checking whether the link is up here is a last line of defense
- * against platforms that forward errors on the system bus as
- * SError upon PCI configuration transactions issued when the link
- * is down. This check is racy by definition and does not stop
- * the system from triggering an SError if the link goes down
- * after this check is performed.
- */
- if (!dw_pcie_link_up(pci))
- return NULL;
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
- if (pci_is_root_bus(bus->parent))
- type = PCIE_ATU_TYPE_CFG0;
- else
- type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
- if (ret)
- return NULL;
- return pp->va_cfg0_base + where;
- }
- static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
- {
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
- ret = pci_generic_config_read(bus, devfn, where, size, val);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
- if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
- if (ret)
- return PCIBIOS_SET_FAILED;
- }
- return PCIBIOS_SUCCESSFUL;
- }
- static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
- {
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
- ret = pci_generic_config_write(bus, devfn, where, size, val);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
- if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
- if (ret)
- return PCIBIOS_SET_FAILED;
- }
- return PCIBIOS_SUCCESSFUL;
- }
- static struct pci_ops dw_child_pcie_ops = {
- .map_bus = dw_pcie_other_conf_map_bus,
- .read = dw_pcie_rd_other_conf,
- .write = dw_pcie_wr_other_conf,
- };
- void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
- {
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (PCI_SLOT(devfn) > 0)
- return NULL;
- return pci->dbi_base + where;
- }
- EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
- static struct pci_ops dw_pcie_ops = {
- .map_bus = dw_pcie_own_conf_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- };
- static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct resource_entry *entry;
- int i, ret;
- /* Note the very first outbound ATU is used for CFG IOs */
- if (!pci->num_ob_windows) {
- dev_err(pci->dev, "No outbound iATU found\n");
- return -EINVAL;
- }
- /*
- * Ensure all outbound windows are disabled before proceeding with
- * the MEM/IO ranges setups.
- */
- for (i = 0; i < pci->num_ob_windows; i++)
- dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
- i = 0;
- resource_list_for_each_entry(entry, &pp->bridge->windows) {
- if (resource_type(entry->res) != IORESOURCE_MEM)
- continue;
- if (pci->num_ob_windows <= ++i)
- break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- if (ret) {
- dev_err(pci->dev, "Failed to set MEM range %pr\n",
- entry->res);
- return ret;
- }
- }
- if (pp->io_size) {
- if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
- if (ret) {
- dev_err(pci->dev, "Failed to set IO range %pr\n",
- entry->res);
- return ret;
- }
- } else {
- pp->cfg0_io_shared = true;
- }
- }
- if (pci->num_ob_windows <= i)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
- pci->num_ob_windows);
- return 0;
- }
- int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
- {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
- int ret;
- /*
- * Enable DBI read-only registers for writing/updating configuration.
- * Write permission gets disabled towards the end of this function.
- */
- dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
- dw_pcie_msi_init(pp);
- /* Setup RC BARs */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
- /* Setup interrupt pins */
- val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
- val &= 0xffff00ff;
- val |= 0x00000100;
- dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
- /* Setup bus numbers */
- val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
- val &= 0xff000000;
- val |= 0x00ff0100;
- dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
- /* Setup command register */
- val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
- val &= 0xffff0000;
- val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
- dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
- /*
- * If the platform provides its own child bus config accesses, it means
- * the platform uses its own address translation component rather than
- * ATU, so we should not program the ATU here.
- */
- if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- ret = dw_pcie_iatu_setup(pp);
- if (ret)
- return ret;
- }
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Program correct class for RC */
- dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
- dw_pcie_dbi_ro_wr_dis(pci);
- return 0;
- }
- EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
|