Merge tag 'pci-v5.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas: "Resource management: - Improve resource assignment for hot-added nested bridges, e.g., Thunderbolt (Nicholas Johnson) Power management: - Optionally print config space of devices before suspend (Chen Yu) - Increase D3 delay for AMD Ryzen5/7 XHCI controllers (Daniel Drake) Virtualization: - Generalize DMA alias quirks (James Sewart) - Add DMA alias quirk for PLX PEX NTB (James Sewart) - Fix IOV memory leak (Navid Emamdoost) AER: - Log which device prevents error recovery (Yicong Yang) Peer-to-peer DMA: - Whitelist Intel SkyLake-E (Armen Baloyan) Broadcom iProc host bridge driver: - Apply PAXC quirk whether driver is built-in or module (Wei Liu) Broadcom STB host bridge driver: - Add Broadcom STB PCIe host controller driver (Jim Quinlan) Intel Gateway SoC host bridge driver: - Add driver for Intel Gateway SoC (Dilip Kota) Intel VMD host bridge driver: - Add support for DMA aliases on other buses (Jon Derrick) - Remove dma_map_ops overrides (Jon Derrick) - Remove now-unused X86_DEV_DMA_OPS (Christoph Hellwig) NVIDIA Tegra host bridge driver: - Fix Tegra30 afi_pex2_ctrl register offset (Marcel Ziswiler) Panasonic UniPhier host bridge driver: - Remove module code since driver can't be built as a module (Masahiro Yamada) Qualcomm host bridge driver: - Add support for SDM845 PCIe controller (Bjorn Andersson) TI Keystone host bridge driver: - Fix "num-viewport" DT property error handling (Kishon Vijay Abraham I) - Fix link training retries initiation (Yurii Monakov) - Fix outbound region mapping (Yurii Monakov) Misc: - Add Switchtec Gen4 support (Kelvin Cao) - Add Switchtec Intercomm Notify and Upstream Error Containment support (Logan Gunthorpe) - Use dma_set_mask_and_coherent() since Switchtec supports 64-bit addressing (Wesley Sheng)" * tag 'pci-v5.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (60 commits) PCI: Allow adjust_bridge_window() to shrink resource if necessary PCI: Set resource size directly in adjust_bridge_window() PCI: Rename extend_bridge_window() to adjust_bridge_window() PCI: Rename extend_bridge_window() parameter PCI: Consider alignment of hot-added bridges when assigning resources PCI: Remove local variable usage in pci_bus_distribute_available_resources() PCI: Pass size + alignment to pci_bus_distribute_available_resources() PCI: Rename variables PCI: vmd: Add two VMD Device IDs PCI: Remove unnecessary braces PCI: brcmstb: Add MSI support PCI: brcmstb: Add Broadcom STB PCIe host controller driver x86/PCI: Remove X86_DEV_DMA_OPS PCI: vmd: Remove dma_map_ops overrides iommu/vt-d: Remove VMD child device sanity check iommu/vt-d: Use pci_real_dma_dev() for mapping PCI: Introduce pci_real_dma_dev() x86/PCI: Expose VMD's pci_dev in struct pci_sysdata x86/PCI: Add to_pci_sysdata() helper PCI/AER: Initialize aer_fifo ...
This commit is contained in:
@@ -209,6 +209,17 @@ config PCIE_ARTPEC6_EP
|
||||
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
|
||||
endpoint mode. This uses the DesignWare core.
|
||||
|
||||
config PCIE_INTEL_GW
|
||||
bool "Intel Gateway PCIe host controller support"
|
||||
depends on OF && (X86 || COMPILE_TEST)
|
||||
depends on PCI_MSI_IRQ_DOMAIN
|
||||
select PCIE_DW_HOST
|
||||
help
|
||||
Say 'Y' here to enable PCIe Host controller support on Intel
|
||||
Gateway SoCs.
|
||||
The PCIe controller uses the DesignWare core plus Intel-specific
|
||||
hardware wrappers.
|
||||
|
||||
config PCIE_KIRIN
|
||||
depends on OF && (ARM64 || COMPILE_TEST)
|
||||
bool "HiSilicon Kirin series SoCs PCIe controllers"
|
||||
|
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
|
||||
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
|
||||
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
|
||||
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
|
||||
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
|
||||
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
|
||||
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
|
||||
obj-$(CONFIG_PCI_MESON) += pci-meson.o
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Samsung EXYNOS SoCs
|
||||
* PCIe host controller driver for Samsung Exynos SoCs
|
||||
*
|
||||
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
|
@@ -422,7 +422,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
|
||||
lower_32_bits(start) | OB_ENABLEN);
|
||||
ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
|
||||
upper_32_bits(start));
|
||||
start += OB_WIN_SIZE;
|
||||
start += OB_WIN_SIZE * SZ_1M;
|
||||
}
|
||||
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
@@ -510,7 +510,7 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
|
||||
/* Disable Link training */
|
||||
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
|
||||
val &= ~LTSSM_EN_VAL;
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
|
||||
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
|
||||
}
|
||||
|
||||
static int ks_pcie_start_link(struct dw_pcie *pci)
|
||||
@@ -1354,7 +1354,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
ret = of_property_read_u32(np, "num-viewport", &num_viewport);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "unable to read *num-viewport* property\n");
|
||||
return ret;
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -51,9 +51,6 @@ static const struct of_device_id artpec6_pcie_of_match[];
|
||||
#define ACK_N_FTS_MASK GENMASK(15, 8)
|
||||
#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
|
||||
|
||||
#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
|
||||
#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
|
||||
|
||||
/* ARTPEC-6 specific registers */
|
||||
#define PCIECFG 0x18
|
||||
#define PCIECFG_DBG_OEN BIT(24)
|
||||
@@ -313,10 +310,7 @@ static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
|
||||
* Set the Number of Fast Training Sequences that the core
|
||||
* advertises as its N_FTS during Gen2 or Gen3 link training.
|
||||
*/
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
val &= ~FAST_TRAINING_SEQ_MASK;
|
||||
val |= FAST_TRAINING_SEQ(180);
|
||||
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
||||
dw_pcie_link_set_n_fts(pci, 180);
|
||||
}
|
||||
|
||||
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "../../pci.h"
|
||||
#include "pcie-designware.h"
|
||||
|
||||
/*
|
||||
@@ -474,6 +475,61 @@ int dw_pcie_link_up(struct dw_pcie *pci)
|
||||
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
|
||||
}
|
||||
|
||||
void dw_pcie_upconfig_setup(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
|
||||
val |= PORT_MLTI_UPCFG_SUPPORT;
|
||||
dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
|
||||
|
||||
void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
|
||||
{
|
||||
u32 reg, val;
|
||||
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
|
||||
reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
|
||||
reg &= ~PCI_EXP_LNKCTL2_TLS;
|
||||
|
||||
switch (pcie_link_speed[link_gen]) {
|
||||
case PCIE_SPEED_2_5GT:
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
|
||||
break;
|
||||
case PCIE_SPEED_5_0GT:
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
|
||||
break;
|
||||
case PCIE_SPEED_8_0GT:
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
|
||||
break;
|
||||
case PCIE_SPEED_16_0GT:
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
|
||||
break;
|
||||
default:
|
||||
/* Use hardware capability */
|
||||
val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
|
||||
val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
|
||||
reg &= ~PCI_EXP_LNKCTL2_HASD;
|
||||
reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
|
||||
break;
|
||||
}
|
||||
|
||||
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
|
||||
|
||||
void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
|
||||
val &= ~PORT_LOGIC_N_FTS_MASK;
|
||||
val |= n_fts & PORT_LOGIC_N_FTS_MASK;
|
||||
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
|
||||
|
||||
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val;
|
||||
|
@@ -30,7 +30,12 @@
|
||||
#define LINK_WAIT_IATU 9
|
||||
|
||||
/* Synopsys-specific PCIe configuration registers */
|
||||
#define PCIE_PORT_AFR 0x70C
|
||||
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
|
||||
#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
|
||||
|
||||
#define PCIE_PORT_LINK_CONTROL 0x710
|
||||
#define PORT_LINK_DLL_LINK_EN BIT(5)
|
||||
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
|
||||
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
|
||||
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
|
||||
@@ -46,6 +51,7 @@
|
||||
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
|
||||
|
||||
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
|
||||
#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
|
||||
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
|
||||
#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
|
||||
#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
|
||||
@@ -60,6 +66,9 @@
|
||||
#define PCIE_MSI_INTR0_MASK 0x82C
|
||||
#define PCIE_MSI_INTR0_STATUS 0x830
|
||||
|
||||
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
|
||||
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
|
||||
|
||||
#define PCIE_ATU_VIEWPORT 0x900
|
||||
#define PCIE_ATU_REGION_INBOUND BIT(31)
|
||||
#define PCIE_ATU_REGION_OUTBOUND 0
|
||||
@@ -273,6 +282,9 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
||||
u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
|
||||
void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
||||
int dw_pcie_link_up(struct dw_pcie *pci);
|
||||
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
|
||||
void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen);
|
||||
void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);
|
||||
int dw_pcie_wait_for_link(struct dw_pcie *pci);
|
||||
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr,
|
||||
|
545
drivers/pci/controller/dwc/pcie-intel-gw.c
Normal file
545
drivers/pci/controller/dwc/pcie-intel-gw.c
Normal file
@@ -0,0 +1,545 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe host controller driver for Intel Gateway SoCs
|
||||
*
|
||||
* Copyright (c) 2019 Intel Corporation.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
#include "../../pci.h"
|
||||
#include "pcie-designware.h"
|
||||
|
||||
#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
|
||||
#define PORT_AFR_N_FTS_GEN3 180
|
||||
#define PORT_AFR_N_FTS_GEN4 196
|
||||
|
||||
/* PCIe Application logic Registers */
|
||||
#define PCIE_APP_CCR 0x10
|
||||
#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
|
||||
|
||||
#define PCIE_APP_MSG_CR 0x30
|
||||
#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
|
||||
|
||||
#define PCIE_APP_PMC 0x44
|
||||
#define PCIE_APP_PMC_IN_L2 BIT(20)
|
||||
|
||||
#define PCIE_APP_IRNEN 0xF4
|
||||
#define PCIE_APP_IRNCR 0xF8
|
||||
#define PCIE_APP_IRN_AER_REPORT BIT(0)
|
||||
#define PCIE_APP_IRN_PME BIT(2)
|
||||
#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
|
||||
#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
|
||||
#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
|
||||
#define PCIE_APP_IRN_BW_MGT BIT(12)
|
||||
#define PCIE_APP_IRN_MSG_LTR BIT(18)
|
||||
#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
|
||||
#define PCIE_APP_INTX_OFST 12
|
||||
|
||||
#define PCIE_APP_IRN_INT \
|
||||
(PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
|
||||
PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
|
||||
PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
|
||||
PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
|
||||
(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTA) | \
|
||||
(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTB) | \
|
||||
(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTC) | \
|
||||
(PCIE_APP_INTX_OFST + PCI_INTERRUPT_INTD))
|
||||
|
||||
#define BUS_IATU_OFFSET SZ_256M
|
||||
#define RESET_INTERVAL_MS 100
|
||||
|
||||
struct intel_pcie_soc {
|
||||
unsigned int pcie_ver;
|
||||
unsigned int pcie_atu_offset;
|
||||
u32 num_viewport;
|
||||
};
|
||||
|
||||
struct intel_pcie_port {
|
||||
struct dw_pcie pci;
|
||||
void __iomem *app_base;
|
||||
struct gpio_desc *reset_gpio;
|
||||
u32 rst_intrvl;
|
||||
u32 max_speed;
|
||||
u32 link_gen;
|
||||
u32 max_width;
|
||||
u32 n_fts;
|
||||
struct clk *core_clk;
|
||||
struct reset_control *core_rst;
|
||||
struct phy *phy;
|
||||
u8 pcie_cap_ofst;
|
||||
};
|
||||
|
||||
static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
|
||||
{
|
||||
u32 old;
|
||||
|
||||
old = readl(base + ofs);
|
||||
val = (old & ~mask) | (val & mask);
|
||||
|
||||
if (val != old)
|
||||
writel(val, base + ofs);
|
||||
}
|
||||
|
||||
static inline u32 pcie_app_rd(struct intel_pcie_port *lpp, u32 ofs)
|
||||
{
|
||||
return readl(lpp->app_base + ofs);
|
||||
}
|
||||
|
||||
static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
|
||||
{
|
||||
writel(val, lpp->app_base + ofs);
|
||||
}
|
||||
|
||||
static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
|
||||
u32 mask, u32 val)
|
||||
{
|
||||
pcie_update_bits(lpp->app_base, ofs, mask, val);
|
||||
}
|
||||
|
||||
static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
|
||||
{
|
||||
return dw_pcie_readl_dbi(&lpp->pci, ofs);
|
||||
}
|
||||
|
||||
static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
|
||||
{
|
||||
dw_pcie_writel_dbi(&lpp->pci, ofs, val);
|
||||
}
|
||||
|
||||
static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
|
||||
u32 mask, u32 val)
|
||||
{
|
||||
pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
|
||||
}
|
||||
|
||||
static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
|
||||
{
|
||||
pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
|
||||
PCIE_APP_CCR_LTSSM_ENABLE);
|
||||
}
|
||||
|
||||
static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
|
||||
{
|
||||
pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
|
||||
}
|
||||
|
||||
static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
u32 val;
|
||||
u8 offset = lpp->pcie_cap_ofst;
|
||||
|
||||
val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP);
|
||||
lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
|
||||
lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val);
|
||||
|
||||
val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
|
||||
|
||||
val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
|
||||
pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
|
||||
}
|
||||
|
||||
static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
u32 val, mask;
|
||||
|
||||
switch (pcie_link_speed[lpp->max_speed]) {
|
||||
case PCIE_SPEED_8_0GT:
|
||||
lpp->n_fts = PORT_AFR_N_FTS_GEN3;
|
||||
break;
|
||||
case PCIE_SPEED_16_0GT:
|
||||
lpp->n_fts = PORT_AFR_N_FTS_GEN4;
|
||||
break;
|
||||
default:
|
||||
lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT;
|
||||
break;
|
||||
}
|
||||
|
||||
mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK;
|
||||
val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) |
|
||||
FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts);
|
||||
pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val);
|
||||
|
||||
/* Port Link Control Register */
|
||||
pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN,
|
||||
PORT_LINK_DLL_LINK_EN);
|
||||
}
|
||||
|
||||
static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
intel_pcie_ltssm_disable(lpp);
|
||||
intel_pcie_link_setup(lpp);
|
||||
dw_pcie_setup_rc(&lpp->pci.pp);
|
||||
dw_pcie_upconfig_setup(&lpp->pci);
|
||||
intel_pcie_port_logic_setup(lpp);
|
||||
dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen);
|
||||
dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);
|
||||
}
|
||||
|
||||
static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
|
||||
{
|
||||
struct device *dev = lpp->pci.dev;
|
||||
int ret;
|
||||
|
||||
lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(lpp->reset_gpio)) {
|
||||
ret = PTR_ERR(lpp->reset_gpio);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Make initial reset last for 100us */
|
||||
usleep_range(100, 200);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
|
||||
{
|
||||
reset_control_assert(lpp->core_rst);
|
||||
}
|
||||
|
||||
static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
|
||||
{
|
||||
/*
|
||||
* One micro-second delay to make sure the reset pulse
|
||||
* wide enough so that core reset is clean.
|
||||
*/
|
||||
udelay(1);
|
||||
reset_control_deassert(lpp->core_rst);
|
||||
|
||||
/*
|
||||
* Some SoC core reset also reset PHY, more delay needed
|
||||
* to make sure the reset process is done.
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
|
||||
{
|
||||
gpiod_set_value_cansleep(lpp->reset_gpio, 1);
|
||||
}
|
||||
|
||||
static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
|
||||
{
|
||||
msleep(lpp->rst_intrvl);
|
||||
gpiod_set_value_cansleep(lpp->reset_gpio, 0);
|
||||
}
|
||||
|
||||
static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
intel_pcie_device_rst_deassert(lpp);
|
||||
intel_pcie_ltssm_enable(lpp);
|
||||
|
||||
return dw_pcie_wait_for_link(&lpp->pci);
|
||||
}
|
||||
|
||||
static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
|
||||
{
|
||||
pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
|
||||
pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
|
||||
}
|
||||
|
||||
static int intel_pcie_get_resources(struct platform_device *pdev)
|
||||
{
|
||||
struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
|
||||
struct dw_pcie *pci = &lpp->pci;
|
||||
struct device *dev = pci->dev;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
|
||||
pci->dbi_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
|
||||
lpp->core_clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(lpp->core_clk)) {
|
||||
ret = PTR_ERR(lpp->core_clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get clks: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
lpp->core_rst = devm_reset_control_get(dev, NULL);
|
||||
if (IS_ERR(lpp->core_rst)) {
|
||||
ret = PTR_ERR(lpp->core_rst);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get resets: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_property_match_string(dev, "device_type", "pci");
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to find pci device type: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = device_property_read_u32(dev, "reset-assert-ms",
|
||||
&lpp->rst_intrvl);
|
||||
if (ret)
|
||||
lpp->rst_intrvl = RESET_INTERVAL_MS;
|
||||
|
||||
ret = of_pci_get_max_link_speed(dev->of_node);
|
||||
lpp->link_gen = ret < 0 ? 0 : ret;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
|
||||
lpp->app_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(lpp->app_base))
|
||||
return PTR_ERR(lpp->app_base);
|
||||
|
||||
lpp->phy = devm_phy_get(dev, "pcie");
|
||||
if (IS_ERR(lpp->phy)) {
|
||||
ret = PTR_ERR(lpp->phy);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
|
||||
{
|
||||
phy_exit(lpp->phy);
|
||||
}
|
||||
|
||||
static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
|
||||
{
|
||||
u32 value;
|
||||
int ret;
|
||||
|
||||
if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT)
|
||||
return 0;
|
||||
|
||||
/* Send PME_TURN_OFF message */
|
||||
pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
|
||||
PCIE_APP_MSG_XMT_PM_TURNOFF);
|
||||
|
||||
/* Read PMC status and wait for falling into L2 link state */
|
||||
ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
|
||||
value & PCIE_APP_PMC_IN_L2, 20,
|
||||
jiffies_to_usecs(5 * HZ));
|
||||
if (ret)
|
||||
dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
|
||||
{
|
||||
if (dw_pcie_link_up(&lpp->pci))
|
||||
intel_pcie_wait_l2(lpp);
|
||||
|
||||
/* Put endpoint device in reset state */
|
||||
intel_pcie_device_rst_assert(lpp);
|
||||
pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
|
||||
}
|
||||
|
||||
static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
|
||||
{
|
||||
struct device *dev = lpp->pci.dev;
|
||||
int ret;
|
||||
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_device_rst_assert(lpp);
|
||||
|
||||
ret = phy_init(lpp->phy);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_pcie_core_rst_deassert(lpp);
|
||||
|
||||
ret = clk_prepare_enable(lpp->core_clk);
|
||||
if (ret) {
|
||||
dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
|
||||
goto clk_err;
|
||||
}
|
||||
|
||||
if (!lpp->pcie_cap_ofst) {
|
||||
ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
|
||||
if (!ret) {
|
||||
ret = -ENXIO;
|
||||
dev_err(dev, "Invalid PCIe capability offset\n");
|
||||
goto app_init_err;
|
||||
}
|
||||
|
||||
lpp->pcie_cap_ofst = ret;
|
||||
}
|
||||
|
||||
intel_pcie_rc_setup(lpp);
|
||||
ret = intel_pcie_app_logic_setup(lpp);
|
||||
if (ret)
|
||||
goto app_init_err;
|
||||
|
||||
/* Enable integrated interrupts */
|
||||
pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
|
||||
PCIE_APP_IRN_INT);
|
||||
|
||||
return 0;
|
||||
|
||||
app_init_err:
|
||||
clk_disable_unprepare(lpp->core_clk);
|
||||
clk_err:
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __intel_pcie_remove(struct intel_pcie_port *lpp)
|
||||
{
|
||||
intel_pcie_core_irq_disable(lpp);
|
||||
intel_pcie_turn_off(lpp);
|
||||
clk_disable_unprepare(lpp->core_clk);
|
||||
intel_pcie_core_rst_assert(lpp);
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
}
|
||||
|
||||
static int intel_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
|
||||
struct pcie_port *pp = &lpp->pci.pp;
|
||||
|
||||
dw_pcie_host_deinit(pp);
|
||||
__intel_pcie_remove(lpp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct intel_pcie_port *lpp = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
intel_pcie_core_irq_disable(lpp);
|
||||
ret = intel_pcie_wait_l2(lpp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_pcie_deinit_phy(lpp);
|
||||
clk_disable_unprepare(lpp->core_clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct intel_pcie_port *lpp = dev_get_drvdata(dev);
|
||||
|
||||
return intel_pcie_host_setup(lpp);
|
||||
}
|
||||
|
||||
static int intel_pcie_rc_init(struct pcie_port *pp)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
||||
struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
|
||||
|
||||
return intel_pcie_host_setup(lpp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy function so that DW core doesn't configure MSI
|
||||
*/
|
||||
static int intel_pcie_msi_init(struct pcie_port *pp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
|
||||
{
|
||||
return cpu_addr + BUS_IATU_OFFSET;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops intel_pcie_ops = {
|
||||
.cpu_addr_fixup = intel_pcie_cpu_addr,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
|
||||
.host_init = intel_pcie_rc_init,
|
||||
.msi_host_init = intel_pcie_msi_init,
|
||||
};
|
||||
|
||||
static const struct intel_pcie_soc pcie_data = {
|
||||
.pcie_ver = 0x520A,
|
||||
.pcie_atu_offset = 0xC0000,
|
||||
.num_viewport = 3,
|
||||
};
|
||||
|
||||
static int intel_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct intel_pcie_soc *data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct intel_pcie_port *lpp;
|
||||
struct pcie_port *pp;
|
||||
struct dw_pcie *pci;
|
||||
int ret;
|
||||
|
||||
lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
|
||||
if (!lpp)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, lpp);
|
||||
pci = &lpp->pci;
|
||||
pci->dev = dev;
|
||||
pp = &pci->pp;
|
||||
|
||||
ret = intel_pcie_get_resources(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_pcie_ep_rst_init(lpp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
data = device_get_match_data(dev);
|
||||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
pci->ops = &intel_pcie_ops;
|
||||
pci->version = data->pcie_ver;
|
||||
pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
|
||||
pp->ops = &intel_pcie_dw_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "Cannot initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intel PCIe doesn't configure IO region, so set viewport
|
||||
* to not perform IO region access.
|
||||
*/
|
||||
pci->num_viewport = data->num_viewport;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops intel_pcie_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
|
||||
intel_pcie_resume_noirq)
|
||||
};
|
||||
|
||||
static const struct of_device_id of_intel_pcie_match[] = {
|
||||
{ .compatible = "intel,lgm-pcie", .data = &pcie_data },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver intel_pcie_driver = {
|
||||
.probe = intel_pcie_probe,
|
||||
.remove = intel_pcie_remove,
|
||||
.driver = {
|
||||
.name = "intel-gw-pcie",
|
||||
.of_match_table = of_intel_pcie_match,
|
||||
.pm = &intel_pcie_pm_ops,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(intel_pcie_driver);
|
@@ -54,6 +54,7 @@
|
||||
#define PCIE20_PARF_LTSSM 0x1B0
|
||||
#define PCIE20_PARF_SID_OFFSET 0x234
|
||||
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
|
||||
#define PCIE20_PARF_DEVICE_TYPE 0x1000
|
||||
|
||||
#define PCIE20_ELBI_SYS_CTRL 0x04
|
||||
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
|
||||
@@ -80,6 +81,8 @@
|
||||
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
|
||||
#define SLV_ADDR_SPACE_SZ 0x10000000
|
||||
|
||||
#define DEVICE_TYPE_RC 0x4
|
||||
|
||||
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
|
||||
struct qcom_pcie_resources_2_1_0 {
|
||||
struct clk *iface_clk;
|
||||
@@ -139,12 +142,20 @@ struct qcom_pcie_resources_2_3_3 {
|
||||
struct reset_control *rst[7];
|
||||
};
|
||||
|
||||
struct qcom_pcie_resources_2_7_0 {
|
||||
struct clk_bulk_data clks[6];
|
||||
struct regulator_bulk_data supplies[2];
|
||||
struct reset_control *pci_reset;
|
||||
struct clk *pipe_clk;
|
||||
};
|
||||
|
||||
union qcom_pcie_resources {
|
||||
struct qcom_pcie_resources_1_0_0 v1_0_0;
|
||||
struct qcom_pcie_resources_2_1_0 v2_1_0;
|
||||
struct qcom_pcie_resources_2_3_2 v2_3_2;
|
||||
struct qcom_pcie_resources_2_3_3 v2_3_3;
|
||||
struct qcom_pcie_resources_2_4_0 v2_4_0;
|
||||
struct qcom_pcie_resources_2_7_0 v2_7_0;
|
||||
};
|
||||
|
||||
struct qcom_pcie;
|
||||
@@ -1068,6 +1079,134 @@ err_clk_iface:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct device *dev = pci->dev;
|
||||
int ret;
|
||||
|
||||
res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
|
||||
if (IS_ERR(res->pci_reset))
|
||||
return PTR_ERR(res->pci_reset);
|
||||
|
||||
res->supplies[0].supply = "vdda";
|
||||
res->supplies[1].supply = "vddpe-3v3";
|
||||
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
|
||||
res->supplies);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
res->clks[0].id = "aux";
|
||||
res->clks[1].id = "cfg";
|
||||
res->clks[2].id = "bus_master";
|
||||
res->clks[3].id = "bus_slave";
|
||||
res->clks[4].id = "slave_q2a";
|
||||
res->clks[5].id = "tbu";
|
||||
|
||||
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
res->pipe_clk = devm_clk_get(dev, "pipe");
|
||||
return PTR_ERR_OR_ZERO(res->pipe_clk);
|
||||
}
|
||||
|
||||
static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct device *dev = pci->dev;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "cannot enable regulators\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
||||
if (ret < 0)
|
||||
goto err_disable_regulators;
|
||||
|
||||
ret = reset_control_assert(res->pci_reset);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "cannot deassert pci reset\n");
|
||||
goto err_disable_clocks;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
|
||||
ret = reset_control_deassert(res->pci_reset);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "cannot deassert pci reset\n");
|
||||
goto err_disable_clocks;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(res->pipe_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot prepare/enable pipe clock\n");
|
||||
goto err_disable_clocks;
|
||||
}
|
||||
|
||||
/* configure PCIe to RC mode */
|
||||
writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
|
||||
|
||||
/* enable PCIe clocks and resets */
|
||||
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||
val &= ~BIT(0);
|
||||
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||
|
||||
/* change DBI base address */
|
||||
writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
|
||||
|
||||
/* MAC PHY_POWERDOWN MUX DISABLE */
|
||||
val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
|
||||
val &= ~BIT(29);
|
||||
writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
|
||||
|
||||
val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
|
||||
val |= BIT(4);
|
||||
writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
|
||||
val |= BIT(31);
|
||||
writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
err_disable_clocks:
|
||||
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
||||
err_disable_regulators:
|
||||
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
||||
|
||||
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
||||
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
||||
}
|
||||
|
||||
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
||||
|
||||
return clk_prepare_enable(res->pipe_clk);
|
||||
}
|
||||
|
||||
static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
||||
|
||||
clk_disable_unprepare(res->pipe_clk);
|
||||
}
|
||||
|
||||
static int qcom_pcie_link_up(struct dw_pcie *pci)
|
||||
{
|
||||
u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
|
||||
@@ -1167,6 +1306,16 @@ static const struct qcom_pcie_ops ops_2_3_3 = {
|
||||
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
|
||||
};
|
||||
|
||||
/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
|
||||
static const struct qcom_pcie_ops ops_2_7_0 = {
|
||||
.get_resources = qcom_pcie_get_resources_2_7_0,
|
||||
.init = qcom_pcie_init_2_7_0,
|
||||
.deinit = qcom_pcie_deinit_2_7_0,
|
||||
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
|
||||
.post_init = qcom_pcie_post_init_2_7_0,
|
||||
.post_deinit = qcom_pcie_post_deinit_2_7_0,
|
||||
};
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.link_up = qcom_pcie_link_up,
|
||||
};
|
||||
@@ -1282,6 +1431,7 @@ static const struct of_device_id qcom_pcie_match[] = {
|
||||
{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
|
||||
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
|
||||
{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
|
||||
{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@@ -9,11 +9,11 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
@@ -171,12 +171,6 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
|
||||
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
writel(0, priv->base + PCL_RCV_INT);
|
||||
writel(0, priv->base + PCL_RCV_INTX);
|
||||
}
|
||||
|
||||
static void uniphier_pcie_irq_ack(struct irq_data *d)
|
||||
{
|
||||
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
||||
@@ -397,14 +391,6 @@ out_clk_disable:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
|
||||
{
|
||||
uniphier_pcie_irq_disable(priv);
|
||||
phy_exit(priv->phy);
|
||||
reset_control_assert(priv->rst);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.start_link = uniphier_pcie_establish_link,
|
||||
.stop_link = uniphier_pcie_stop_link,
|
||||
@@ -456,31 +442,16 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
|
||||
return uniphier_add_pcie_port(priv, pdev);
|
||||
}
|
||||
|
||||
static int uniphier_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
|
||||
|
||||
uniphier_pcie_host_disable(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id uniphier_pcie_match[] = {
|
||||
{ .compatible = "socionext,uniphier-pcie", },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
|
||||
|
||||
static struct platform_driver uniphier_pcie_driver = {
|
||||
.probe = uniphier_pcie_probe,
|
||||
.remove = uniphier_pcie_remove,
|
||||
.driver = {
|
||||
.name = "uniphier-pcie",
|
||||
.of_match_table = uniphier_pcie_match,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(uniphier_pcie_driver);
|
||||
|
||||
MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
|
||||
MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
Reference in New Issue
Block a user