Merge tag 'pci-v4.21-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

Pull PCI updates from Bjorn Helgaas:

 - Remove unused lists from ASPM pcie_link_state (Frederick Lawler)

 - Fix Broadcom CNB20LE host bridge unintended sign extension (Colin Ian
   King)

 - Expand Kconfig "PF" acronyms (Randy Dunlap)

 - Update MAINTAINERS for arch/x86/kernel/early-quirks.c (Bjorn Helgaas)

 - Add missing include to drivers/pci.h (Alexandru Gagniuc)

 - Override Synopsys USB 3.x HAPS device class so dwc3-haps can claim it
   instead of xhci (Thinh Nguyen)

 - Clean up P2PDMA documentation (Randy Dunlap)

 - Allow runtime PM even if driver doesn't supply callbacks (Jarkko
   Nikula)

 - Remove status check after submitting Switchtec MRPC Firmware Download
   commands to avoid Completion Timeouts (Kelvin Cao)

 - Set Switchtec coherent DMA mask to allow 64-bit DMA (Boris Glimcher)

 - Fix Switchtec SWITCHTEC_IOCTL_EVENT_IDX_ALL flag overwrite issue
   (Joey Zhang)

 - Enable write combining for Switchtec MRPC Input buffers (Kelvin Cao)

 - Add Switchtec MRPC DMA mode support (Wesley Sheng)

 - Skip VF scanning on powerpc, which does this in firmware (Sebastian
   Ott)

 - Add Amlogic Meson PCIe controller driver and DT bindings (Yue Wang)

 - Constify histb dw_pcie_host_ops structure (Julia Lawall)

 - Support multiple power domains for imx6 (Leonard Crestez)

 - Constify layerscape driver data (Stefan Agner)

 - Update imx6 Kconfig to allow imx6 PCIe in imx7 kernel (Trent Piepho)

 - Support armada8k GPIO reset (Baruch Siach)

 - Support suspend/resume support on imx6 (Leonard Crestez)

 - Don't hard-code DesignWare DBI/ATU offst (Stephen Warren)

 - Skip i.MX6 PHY setup on i.MX7D (Andrey Smirnov)

 - Remove Jianguo Sun from HiSilicon STB maintainers (Lorenzo Pieralisi)

 - Mask DesignWare interrupts instead of disabling them to avoid lost
   interrupts (Marc Zyngier)

 - Add locking when acking DesignWare interrupts (Marc Zyngier)

 - Ack DesignWare interrupts in the proper callbacks (Marc Zyngier)

 - Use devm resource parser in mediatek (Honghui Zhang)

 - Remove unused mediatek "num-lanes" DT property (Honghui Zhang)

 - Add UniPhier PCIe controller driver and DT bindings (Kunihiko
   Hayashi)

 - Enable MSI for imx6 downstream components (Richard Zhu)

* tag 'pci-v4.21-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (40 commits)
  PCI: imx: Enable MSI from downstream components
  s390/pci: skip VF scanning
  PCI/IOV: Add flag so platforms can skip VF scanning
  PCI/IOV: Factor out sriov_add_vfs()
  PCI: uniphier: Add UniPhier PCIe host controller support
  dt-bindings: PCI: Add UniPhier PCIe host controller description
  PCI: amlogic: Add the Amlogic Meson PCIe controller driver
  dt-bindings: PCI: meson: add DT bindings for Amlogic Meson PCIe controller
  arm64: dts: mt7622: Remove un-used property for PCIe
  arm: dts: mt7623: Remove un-used property for PCIe
  dt-bindings: PCI: MediaTek: Remove un-used property
  PCI: mediatek: Remove un-used variant in struct mtk_pcie_port
  MAINTAINERS: Remove Jianguo Sun from HiSilicon STB DWC entry
  PCI: dwc: Don't hard-code DBI/ATU offset
  PCI: imx: Add imx6sx suspend/resume support
  PCI: armada8k: Add support for gpio controlled reset signal
  PCI: dwc: Adjust Kconfig to allow IMX6 PCIe host on IMX7
  PCI: dwc: layerscape: Constify driver data
  PCI: imx: Add multi-pd support
  PCI: Override Synopsys USB 3.x HAPS device class
  ...
This commit is contained in:
Linus Torvalds
2019-01-05 17:57:34 -08:00
37 changed files with 1723 additions and 212 deletions

View File

@@ -89,8 +89,8 @@ config PCI_EXYNOS
select PCIE_DW_HOST
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
depends on SOC_IMX6Q || (ARM && COMPILE_TEST)
bool "Freescale i.MX6/7 PCIe controller"
depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
@@ -193,4 +193,24 @@ config PCIE_HISI_STB
help
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
bool "MESON PCIe controller"
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Amlogic
SoCs. The PCI controller on Amlogic is based on DesignWare hardware
and therefore the driver re-uses the DesignWare core functions to
implement the driver.
config PCIE_UNIPHIER
bool "Socionext UniPhier PCIe controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support on UniPhier SoCs.
This driver supports LD20 and PXs3 SoCs.
endmenu

View File

@@ -14,6 +14,8 @@ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.

View File

@@ -27,6 +27,8 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include "pcie-designware.h"
@@ -59,6 +61,11 @@ struct imx6_pcie {
u32 tx_swing_low;
int link_gen;
struct regulator *vpcie;
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -67,6 +74,7 @@ struct imx6_pcie {
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_IMX6_MSI_CAP 0x50
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
@@ -290,6 +298,43 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 1;
}
static int imx6_pcie_attach_pd(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
if (IS_ERR(imx6_pcie->pd_pcie))
return PTR_ERR(imx6_pcie->pd_pcie);
link = device_link_add(dev, imx6_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
dev_err(dev, "Failed to add device_link to pcie pd.\n");
return -EINVAL;
}
imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pd_pcie_phy))
return PTR_ERR(imx6_pcie->pd_pcie_phy);
device_link_add(dev, imx6_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (IS_ERR(link)) {
dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
return PTR_ERR(link);
}
return 0;
}
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -765,8 +810,28 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
reset_control_assert(imx6_pcie->turnoff_reset);
reset_control_deassert(imx6_pcie->turnoff_reset);
struct device *dev = imx6_pcie->pci->dev;
/* Some variants have a turnoff reset in DT */
if (imx6_pcie->turnoff_reset) {
reset_control_assert(imx6_pcie->turnoff_reset);
reset_control_deassert(imx6_pcie->turnoff_reset);
goto pm_turnoff_sleep;
}
/* Others poke directly at IOMUXC registers */
switch (imx6_pcie->variant) {
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
break;
default:
dev_err(dev, "PME_Turn_Off not implemented\n");
return;
}
/*
* Components with an upstream port must respond to
@@ -775,6 +840,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
* The standard recommends a 1-10ms timeout after which to
* proceed anyway as if acks were received.
*/
pm_turnoff_sleep:
usleep_range(1000, 10000);
}
@@ -784,18 +850,31 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
clk_disable_unprepare(imx6_pcie->pcie_phy);
clk_disable_unprepare(imx6_pcie->pcie_bus);
if (imx6_pcie->variant == IMX7D) {
switch (imx6_pcie->variant) {
case IMX6SX:
clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
break;
case IMX7D:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
default:
break;
}
}
static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
{
return (imx6_pcie->variant == IMX7D ||
imx6_pcie->variant == IMX6SX);
}
static int imx6_pcie_suspend_noirq(struct device *dev)
{
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
if (imx6_pcie->variant != IMX7D)
if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
@@ -811,7 +890,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
struct pcie_port *pp = &imx6_pcie->pci->pp;
if (imx6_pcie->variant != IMX7D)
if (!imx6_pcie_supports_suspend(imx6_pcie))
return 0;
imx6_pcie_assert_core_reset(imx6_pcie);
@@ -840,6 +919,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct resource *dbi_base;
struct device_node *node = dev->of_node;
int ret;
u16 val;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -977,10 +1057,22 @@ static int imx6_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx6_pcie);
ret = imx6_pcie_attach_pd(dev);
if (ret)
return ret;
ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
if (pci_msi_enabled()) {
val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
val);
}
return 0;
}

View File

@@ -222,12 +222,12 @@ static const struct dw_pcie_ops dw_ls_pcie_ops = {
.link_up = ls_pcie_link_up,
};
static struct ls_pcie_drvdata ls1021_drvdata = {
static const struct ls_pcie_drvdata ls1021_drvdata = {
.ops = &ls1021_pcie_host_ops,
.dw_pcie_ops = &dw_ls1021_pcie_ops,
};
static struct ls_pcie_drvdata ls1043_drvdata = {
static const struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
.lut_dbg = 0x7fc,
@@ -235,7 +235,7 @@ static struct ls_pcie_drvdata ls1043_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
static struct ls_pcie_drvdata ls1046_drvdata = {
static const struct ls_pcie_drvdata ls1046_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 24,
.lut_dbg = 0x407fc,
@@ -243,7 +243,7 @@ static struct ls_pcie_drvdata ls1046_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
static struct ls_pcie_drvdata ls2080_drvdata = {
static const struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x7fc,
@@ -251,7 +251,7 @@ static struct ls_pcie_drvdata ls2080_drvdata = {
.dw_pcie_ops = &dw_ls_pcie_ops,
};
static struct ls_pcie_drvdata ls2088_drvdata = {
static const struct ls_pcie_drvdata ls2088_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
.lut_dbg = 0x407fc,

View File

@@ -0,0 +1,592 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe host controller driver for Amlogic MESON SoCs
*
* Copyright (c) 2018 Amlogic, inc.
* Author: Yue Wang <yue.wang@amlogic.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
#include "pcie-designware.h"
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
/* External local bus interface registers */
#define PLR_OFFSET 0x700
#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
#define FAST_LINK_MODE BIT(7)
#define LINK_CAPABLE_MASK GENMASK(21, 16)
#define LINK_CAPABLE_X1 BIT(16)
#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
#define NUM_OF_LANES_MASK GENMASK(12, 8)
#define NUM_OF_LANES_X1 BIT(8)
#define DIRECT_SPEED_CHANGE BIT(17)
#define TYPE1_HDR_OFFSET 0x0
#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
#define PCI_IO_EN BIT(0)
#define PCI_MEM_SPACE_EN BIT(1)
#define PCI_BUS_MASTER_EN BIT(2)
#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
#define PCIE_CAP_OFFSET 0x70
#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
/* PCIe specific config registers */
#define PCIE_CFG0 0x0
#define APP_LTSSM_ENABLE BIT(7)
#define PCIE_CFG_STATUS12 0x30
#define IS_SMLH_LINK_UP(x) ((x) & (1 << 6))
#define IS_RDLH_LINK_UP(x) ((x) & (1 << 16))
#define IS_LTSSM_UP(x) ((((x) >> 10) & 0x1f) == 0x11)
#define PCIE_CFG_STATUS17 0x44
#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
#define WAIT_LINKUP_TIMEOUT 4000
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
#define MESON_PCIE_PHY_POWERUP 0x1c
#define PCIE_RESET_DELAY 500
#define PCIE_SHARED_RESET 1
#define PCIE_NORMAL_RESET 0
enum pcie_data_rate {
PCIE_GEN1,
PCIE_GEN2,
PCIE_GEN3,
PCIE_GEN4
};
struct meson_pcie_mem_res {
void __iomem *elbi_base;
void __iomem *cfg_base;
void __iomem *phy_base;
};
struct meson_pcie_clk_res {
struct clk *clk;
struct clk *mipi_gate;
struct clk *port_clk;
struct clk *general_clk;
};
struct meson_pcie_rc_reset {
struct reset_control *phy;
struct reset_control *port;
struct reset_control *apb;
};
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
const char *id,
u32 reset_type)
{
struct device *dev = mp->pci.dev;
struct reset_control *reset;
if (reset_type == PCIE_SHARED_RESET)
reset = devm_reset_control_get_shared(dev, id);
else
reset = devm_reset_control_get(dev, id);
return reset;
}
static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
if (IS_ERR(mrst->phy))
return PTR_ERR(mrst->phy);
reset_control_deassert(mrst->phy);
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
return PTR_ERR(mrst->port);
reset_control_deassert(mrst->port);
mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
if (IS_ERR(mrst->apb))
return PTR_ERR(mrst->apb);
reset_control_deassert(mrst->apb);
return 0;
}
static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
struct meson_pcie *mp,
const char *id)
{
struct device *dev = mp->pci.dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
return devm_ioremap_resource(dev, res);
}
static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
struct meson_pcie *mp,
const char *id)
{
struct device *dev = mp->pci.dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
if (!res) {
dev_err(dev, "No REG resource %s\n", id);
return ERR_PTR(-ENXIO);
}
return devm_ioremap(dev, res->start, resource_size(res));
}
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
if (IS_ERR(mp->mem_res.elbi_base))
return PTR_ERR(mp->mem_res.elbi_base);
mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
/* Meson SoC has two PCI controllers use same phy register*/
mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
if (IS_ERR(mp->mem_res.phy_base))
return PTR_ERR(mp->mem_res.phy_base);
return 0;
}
static void meson_pcie_power_on(struct meson_pcie *mp)
{
writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
}
static void meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
reset_control_assert(mrst->phy);
udelay(PCIE_RESET_DELAY);
reset_control_deassert(mrst->phy);
udelay(PCIE_RESET_DELAY);
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
udelay(PCIE_RESET_DELAY);
reset_control_deassert(mrst->port);
reset_control_deassert(mrst->apb);
udelay(PCIE_RESET_DELAY);
}
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
const char *id, u64 rate)
{
struct clk *clk;
int ret;
clk = devm_clk_get(dev, id);
if (IS_ERR(clk))
return clk;
if (rate) {
ret = clk_set_rate(clk, rate);
if (ret) {
dev_err(dev, "set clk rate failed, ret = %d\n", ret);
return ERR_PTR(ret);
}
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "couldn't enable clk\n");
return ERR_PTR(ret);
}
devm_add_action_or_reset(dev,
(void (*) (void *))clk_disable_unprepare,
clk);
return clk;
}
static int meson_pcie_probe_clocks(struct meson_pcie *mp)
{
struct device *dev = mp->pci.dev;
struct meson_pcie_clk_res *res = &mp->clk_res;
res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
if (IS_ERR(res->mipi_gate))
return PTR_ERR(res->mipi_gate);
res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
return 0;
}
static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
writel(val, mp->mem_res.elbi_base + reg);
}
static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
{
return readl(mp->mem_res.elbi_base + reg);
}
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
{
return readl(mp->mem_res.cfg_base + reg);
}
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
writel(val, mp->mem_res.cfg_base + reg);
}
static void meson_pcie_assert_reset(struct meson_pcie *mp)
{
gpiod_set_value_cansleep(mp->reset_gpio, 0);
udelay(500);
gpiod_set_value_cansleep(mp->reset_gpio, 1);
}
static void meson_pcie_init_dw(struct meson_pcie *mp)
{
u32 val;
val = meson_cfg_readl(mp, PCIE_CFG0);
val |= APP_LTSSM_ENABLE;
meson_cfg_writel(mp, val, PCIE_CFG0);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
val &= ~LINK_CAPABLE_MASK;
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
val &= ~NUM_OF_LANES_MASK;
meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
}
static int meson_size_to_payload(struct meson_pcie *mp, int size)
{
struct device *dev = mp->pci.dev;
/*
* dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
* So if input size is not 2^order alignment or less than 2^7 or bigger
* than 2^12, just set to default size 2^(1+7).
*/
if (!is_power_of_2(size) || size < 128 || size > 4096) {
dev_warn(dev, "payload size %d, set to default 256\n", size);
return 1;
}
return fls(size) - 8;
}
static void meson_set_max_payload(struct meson_pcie *mp, int size)
{
u32 val;
int max_payload_size = meson_size_to_payload(mp, size);
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
}
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
{
u32 val;
int max_rd_req_size = meson_size_to_payload(mp, size);
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
}
static inline void meson_enable_memory_space(struct meson_pcie *mp)
{
/* Set the RC Bus Master, Memory Space and I/O Space enables */
meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
PCIE_STATUS_COMMAND);
}
static int meson_pcie_establish_link(struct meson_pcie *mp)
{
struct dw_pcie *pci = &mp->pci;
struct pcie_port *pp = &pci->pp;
meson_pcie_init_dw(mp);
meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
dw_pcie_setup_rc(pp);
meson_enable_memory_space(mp);
meson_pcie_assert_reset(mp);
return dw_pcie_wait_for_link(pci);
}
static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
{
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(&mp->pci.pp);
}
static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret;
ret = dw_pcie_read(pci->dbi_base + where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
/*
* There is a bug in the MESON AXG PCIe controller whereby software
* cannot program the PCI_CLASS_DEVICE register, so we must fabricate
* the return value in the config accessors.
*/
if (where == PCI_CLASS_REVISION && size == 4)
*val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
else if (where == PCI_CLASS_DEVICE && size == 2)
*val = PCI_CLASS_BRIDGE_PCI;
else if (where == PCI_CLASS_DEVICE && size == 1)
*val = PCI_CLASS_BRIDGE_PCI & 0xff;
else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
*val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
return PCIBIOS_SUCCESSFUL;
}
static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
int size, u32 val)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
return dw_pcie_write(pci->dbi_base + where, size, val);
}
static int meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
u32 speed_okay = 0;
u32 cnt = 0;
u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
do {
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
smlh_up = IS_SMLH_LINK_UP(state12);
rdlh_up = IS_RDLH_LINK_UP(state12);
ltssm_up = IS_LTSSM_UP(state12);
if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
speed_okay = 1;
if (smlh_up)
dev_dbg(dev, "smlh_link_up is on\n");
if (rdlh_up)
dev_dbg(dev, "rdlh_link_up is on\n");
if (ltssm_up)
dev_dbg(dev, "ltssm_up is on\n");
if (speed_okay)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
return 1;
cnt++;
udelay(10);
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
return 0;
}
static int meson_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
int ret;
ret = meson_pcie_establish_link(mp);
if (ret)
return ret;
meson_pcie_enable_interrupts(mp);
return 0;
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
.rd_own_conf = meson_pcie_rd_own_conf,
.wr_own_conf = meson_pcie_wr_own_conf,
.host_init = meson_pcie_host_init,
};
static int meson_add_pcie_port(struct meson_pcie *mp,
struct platform_device *pdev)
{
struct dw_pcie *pci = &mp->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
if (pp->msi_irq < 0) {
dev_err(dev, "failed to get MSI IRQ\n");
return pp->msi_irq;
}
}
pp->ops = &meson_pcie_host_ops;
pci->dbi_base = mp->mem_res.elbi_base;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
}
return 0;
}
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = meson_pcie_link_up,
};
static int meson_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
int ret;
mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
if (!mp)
return -ENOMEM;
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(mp->reset_gpio)) {
dev_err(dev, "get reset gpio failed\n");
return PTR_ERR(mp->reset_gpio);
}
ret = meson_pcie_get_resets(mp);
if (ret) {
dev_err(dev, "get reset resource failed, %d\n", ret);
return ret;
}
ret = meson_pcie_get_mems(pdev, mp);
if (ret) {
dev_err(dev, "get memory resource failed, %d\n", ret);
return ret;
}
meson_pcie_power_on(mp);
meson_pcie_reset(mp);
ret = meson_pcie_probe_clocks(mp);
if (ret) {
dev_err(dev, "init clock resources failed, %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, mp);
ret = meson_add_pcie_port(mp, pdev);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
},
{},
};
static struct platform_driver meson_pcie_driver = {
.probe = meson_pcie_probe,
.driver = {
.name = "meson-pcie",
.of_match_table = meson_pcie_of_match,
},
};
builtin_platform_driver(meson_pcie_driver);

View File

@@ -22,6 +22,7 @@
#include <linux/resource.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
#include <linux/gpio/consumer.h>
#include "pcie-designware.h"
@@ -29,6 +30,7 @@ struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
struct gpio_desc *reset_gpio;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -137,6 +139,12 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
if (pcie->reset_gpio) {
/* assert and then deassert the reset signal */
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
msleep(100);
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
}
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pcie);
@@ -249,6 +257,14 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail_clkreg;
}
/* Get reset gpio signal and hold asserted (logically high) */
pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(pcie->reset_gpio)) {
ret = PTR_ERR(pcie->reset_gpio);
goto fail_clkreg;
}
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);

View File

@@ -503,6 +503,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
return -EINVAL;
}
if (pci->iatu_unroll_enabled && !pci->atu_base) {
dev_err(dev, "atu_base is not populated\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
if (ret < 0) {

View File

@@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
(i * MAX_MSI_IRQS_PER_CTRL) +
pos);
generic_handle_irq(irq);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
4, 1 << pos);
pos++;
}
}
@@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data)
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
pp->irq_status[ctrl]);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_status[ctrl] |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
pp->irq_status[ctrl]);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
~pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct msi_desc *msi = irq_data_get_msi_desc(d);
struct pcie_port *pp;
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
pp = msi_desc_to_pci_sysdata(msi);
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
raw_spin_lock_irqsave(&pp->lock, flags);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
4, &pp->irq_status[ctrl]);
4, ~0);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
4, ~0);
pp->irq_status[ctrl] = 0;
}
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
@@ -699,6 +710,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
if (pci->iatu_unroll_enabled && !pci->atu_base)
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);

View File

@@ -93,7 +93,7 @@ static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
return dw_pcie_readl_dbi(pci, offset + reg);
return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
@@ -101,7 +101,7 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
dw_pcie_writel_dbi(pci, offset + reg, val);
dw_pcie_writel_atu(pci, offset + reg, val);
}
static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
@@ -187,7 +187,7 @@ static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
return dw_pcie_readl_dbi(pci, offset + reg);
return dw_pcie_readl_atu(pci, offset + reg);
}
static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
@@ -195,7 +195,7 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
dw_pcie_writel_dbi(pci, offset + reg, val);
dw_pcie_writel_atu(pci, offset + reg, val);
}
static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,

View File

@@ -92,12 +92,20 @@
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9))
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
* default; the driver core automatically derives atu_base from dbi_base using
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
/* Register address builder */
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
((region) << 9)
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
(((region) << 9) | (0x1 << 8))
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -219,6 +227,8 @@ struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
/* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
u32 num_viewport;
u8 iatu_unroll_enabled;
struct pcie_port pp;
@@ -289,6 +299,16 @@ static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
__dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
}
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;

View File

@@ -202,7 +202,7 @@ static int histb_pcie_host_init(struct pcie_port *pp)
return 0;
}
static struct dw_pcie_host_ops histb_pcie_host_ops = {
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
.rd_own_conf = histb_pcie_rd_own_conf,
.wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,

View File

@@ -0,0 +1,471 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe host controller driver for UniPhier SoCs
* Copyright 2018 Socionext Inc.
* Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
*/
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "pcie-designware.h"
#define PCL_PINCTRL0 0x002c
#define PCL_PERST_PLDN_REGEN BIT(12)
#define PCL_PERST_NOE_REGEN BIT(11)
#define PCL_PERST_OUT_REGEN BIT(8)
#define PCL_PERST_PLDN_REGVAL BIT(4)
#define PCL_PERST_NOE_REGVAL BIT(3)
#define PCL_PERST_OUT_REGVAL BIT(0)
#define PCL_PIPEMON 0x0044
#define PCL_PCLK_ALIVE BIT(15)
#define PCL_APP_READY_CTRL 0x8008
#define PCL_APP_LTSSM_ENABLE BIT(0)
#define PCL_APP_PM0 0x8078
#define PCL_SYS_AUX_PWR_DET BIT(8)
#define PCL_RCV_INT 0x8108
#define PCL_RCV_INT_ALL_ENABLE GENMASK(20, 17)
#define PCL_CFG_BW_MGT_STATUS BIT(4)
#define PCL_CFG_LINK_AUTO_BW_STATUS BIT(3)
#define PCL_CFG_AER_RC_ERR_MSI_STATUS BIT(2)
#define PCL_CFG_PME_MSI_STATUS BIT(1)
#define PCL_RCV_INTX 0x810c
#define PCL_RCV_INTX_ALL_ENABLE GENMASK(19, 16)
#define PCL_RCV_INTX_ALL_MASK GENMASK(11, 8)
#define PCL_RCV_INTX_MASK_SHIFT 8
#define PCL_RCV_INTX_ALL_STATUS GENMASK(3, 0)
#define PCL_RCV_INTX_STATUS_SHIFT 0
#define PCL_STATUS_LINK 0x8140
#define PCL_RDLH_LINK_UP BIT(1)
#define PCL_XMLH_LINK_UP BIT(0)
struct uniphier_pcie_priv {
void __iomem *base;
struct dw_pcie pci;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
struct irq_domain *legacy_irq_domain;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
bool enable)
{
u32 val;
val = readl(priv->base + PCL_APP_READY_CTRL);
if (enable)
val |= PCL_APP_LTSSM_ENABLE;
else
val &= ~PCL_APP_LTSSM_ENABLE;
writel(val, priv->base + PCL_APP_READY_CTRL);
}
static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
{
u32 val;
/* use auxiliary power detection */
val = readl(priv->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
writel(val, priv->base + PCL_APP_PM0);
/* assert PERST# */
val = readl(priv->base + PCL_PINCTRL0);
val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
| PCL_PERST_PLDN_REGVAL);
val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
| PCL_PERST_PLDN_REGEN;
writel(val, priv->base + PCL_PINCTRL0);
uniphier_pcie_ltssm_enable(priv, false);
usleep_range(100000, 200000);
/* deassert PERST# */
val = readl(priv->base + PCL_PINCTRL0);
val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
writel(val, priv->base + PCL_PINCTRL0);
}
static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
{
u32 status;
int ret;
/* wait PIPE clock */
ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
status & PCL_PCLK_ALIVE, 100000, 1000000);
if (ret) {
dev_err(priv->pci.dev,
"Failed to initialize controller in RC mode\n");
return ret;
}
return 0;
}
static int uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
u32 val, mask;
val = readl(priv->base + PCL_STATUS_LINK);
mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
return (val & mask) == mask;
}
static int uniphier_pcie_establish_link(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
if (dw_pcie_link_up(pci))
return 0;
uniphier_pcie_ltssm_enable(priv, true);
return dw_pcie_wait_for_link(pci);
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
uniphier_pcie_ltssm_enable(priv, false);
}
static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
{
writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
}
static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
{
writel(0, priv->base + PCL_RCV_INT);
writel(0, priv->base + PCL_RCV_INTX);
}
static void uniphier_pcie_irq_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
u32 val;
val = readl(priv->base + PCL_RCV_INTX);
val &= ~PCL_RCV_INTX_ALL_STATUS;
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
}
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
u32 val;
val = readl(priv->base + PCL_RCV_INTX);
val &= ~PCL_RCV_INTX_ALL_MASK;
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
}
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
u32 val;
val = readl(priv->base + PCL_RCV_INTX);
val &= ~PCL_RCV_INTX_ALL_MASK;
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
}
static struct irq_chip uniphier_pcie_irq_chip = {
.name = "PCI",
.irq_ack = uniphier_pcie_irq_ack,
.irq_mask = uniphier_pcie_irq_mask,
.irq_unmask = uniphier_pcie_irq_unmask,
};
static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops uniphier_intx_domain_ops = {
.map = uniphier_pcie_intx_map,
};
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
struct pcie_port *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
u32 val, bit, virq;
/* INT for debug */
val = readl(priv->base + PCL_RCV_INT);
if (val & PCL_CFG_BW_MGT_STATUS)
dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
dev_dbg(pci->dev, "Root Error\n");
if (val & PCL_CFG_PME_MSI_STATUS)
dev_dbg(pci->dev, "PME Interrupt\n");
writel(val, priv->base + PCL_RCV_INT);
/* INTx */
chained_irq_enter(chip, desc);
val = readl(priv->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
}
static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!np_intc) {
dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
return -EINVAL;
}
pp->irq = irq_of_parse_and_map(np_intc, 0);
if (!pp->irq) {
dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
return -EINVAL;
}
priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!priv->legacy_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
return -ENODEV;
}
irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
pp);
return 0;
}
static int uniphier_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
int ret;
ret = uniphier_pcie_config_legacy_irq(pp);
if (ret)
return ret;
uniphier_pcie_irq_enable(priv);
dw_pcie_setup_rc(pp);
ret = uniphier_pcie_establish_link(pci);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
return 0;
}
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
struct platform_device *pdev)
{
struct dw_pcie *pci = &priv->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
pp->ops = &uniphier_pcie_host_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq < 0)
return pp->msi_irq;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "Failed to initialize host (%d)\n", ret);
return ret;
}
return 0;
}
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
{
int ret;
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
ret = reset_control_deassert(priv->rst);
if (ret)
goto out_clk_disable;
uniphier_pcie_init_rc(priv);
ret = phy_init(priv->phy);
if (ret)
goto out_rst_assert;
ret = uniphier_pcie_wait_rc(priv);
if (ret)
goto out_phy_exit;
return 0;
out_phy_exit:
phy_exit(priv->phy);
out_rst_assert:
reset_control_assert(priv->rst);
out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
{
uniphier_pcie_irq_disable(priv);
phy_exit(priv->phy);
reset_control_assert(priv->rst);
clk_disable_unprepare(priv->clk);
}
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = uniphier_pcie_establish_link,
.stop_link = uniphier_pcie_stop_link,
.link_up = uniphier_pcie_link_up,
};
static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_priv *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(priv->pci.dbi_base))
return PTR_ERR(priv->pci.dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
priv->rst = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
priv->phy = devm_phy_optional_get(dev, "pcie-phy");
if (IS_ERR(priv->phy))
return PTR_ERR(priv->phy);
platform_set_drvdata(pdev, priv);
ret = uniphier_pcie_host_enable(priv);
if (ret)
return ret;
return uniphier_add_pcie_port(priv, pdev);
}
static int uniphier_pcie_remove(struct platform_device *pdev)
{
struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
uniphier_pcie_host_disable(priv);
return 0;
}
static const struct of_device_id uniphier_pcie_match[] = {
{ .compatible = "socionext,uniphier-pcie", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
static struct platform_driver uniphier_pcie_driver = {
.probe = uniphier_pcie_probe,
.remove = uniphier_pcie_remove,
.driver = {
.name = "uniphier-pcie",
.of_match_table = uniphier_pcie_match,
},
};
builtin_platform_driver(uniphier_pcie_driver);
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
MODULE_LICENSE("GPL v2");