pcie-mediatek-gen3.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2020 MediaTek Inc.
  6. * Author: Jianjun Wang <[email protected]>
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/delay.h>
  10. #include <linux/iopoll.h>
  11. #include <linux/irq.h>
  12. #include <linux/irqchip/chained_irq.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/msi.h>
  17. #include <linux/pci.h>
  18. #include <linux/phy/phy.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/reset.h>
  23. #include "../pci.h"
  24. #define PCIE_SETTING_REG 0x80
  25. #define PCIE_PCI_IDS_1 0x9c
  26. #define PCI_CLASS(class) (class << 8)
  27. #define PCIE_RC_MODE BIT(0)
  28. #define PCIE_CFGNUM_REG 0x140
  29. #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
  30. #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
  31. #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
  32. #define PCIE_CFG_FORCE_BYTE_EN BIT(20)
  33. #define PCIE_CFG_OFFSET_ADDR 0x1000
  34. #define PCIE_CFG_HEADER(bus, devfn) \
  35. (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  36. #define PCIE_RST_CTRL_REG 0x148
  37. #define PCIE_MAC_RSTB BIT(0)
  38. #define PCIE_PHY_RSTB BIT(1)
  39. #define PCIE_BRG_RSTB BIT(2)
  40. #define PCIE_PE_RSTB BIT(3)
  41. #define PCIE_LTSSM_STATUS_REG 0x150
  42. #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
  43. #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
  44. #define PCIE_LTSSM_STATE_L2_IDLE 0x14
  45. #define PCIE_LINK_STATUS_REG 0x154
  46. #define PCIE_PORT_LINKUP BIT(8)
  47. #define PCIE_MSI_SET_NUM 8
  48. #define PCIE_MSI_IRQS_PER_SET 32
  49. #define PCIE_MSI_IRQS_NUM \
  50. (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  51. #define PCIE_INT_ENABLE_REG 0x180
  52. #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  53. #define PCIE_MSI_SHIFT 8
  54. #define PCIE_INTX_SHIFT 24
  55. #define PCIE_INTX_ENABLE \
  56. GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  57. #define PCIE_INT_STATUS_REG 0x184
  58. #define PCIE_MSI_SET_ENABLE_REG 0x190
  59. #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  60. #define PCIE_MSI_SET_BASE_REG 0xc00
  61. #define PCIE_MSI_SET_OFFSET 0x10
  62. #define PCIE_MSI_SET_STATUS_OFFSET 0x04
  63. #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
  64. #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
  65. #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
  66. #define PCIE_ICMD_PM_REG 0x198
  67. #define PCIE_TURN_OFF_LINK BIT(4)
  68. #define PCIE_MISC_CTRL_REG 0x348
  69. #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
  70. #define PCIE_TRANS_TABLE_BASE_REG 0x800
  71. #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
  72. #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
  73. #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
  74. #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
  75. #define PCIE_ATR_TLB_SET_OFFSET 0x20
  76. #define PCIE_MAX_TRANS_TABLES 8
  77. #define PCIE_ATR_EN BIT(0)
  78. #define PCIE_ATR_SIZE(size) \
  79. (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  80. #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
  81. #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
  82. #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
  83. #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
  84. #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
  85. #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
  86. /**
  87. * struct mtk_msi_set - MSI information for each set
  88. * @base: IO mapped register base
  89. * @msg_addr: MSI message address
  90. * @saved_irq_state: IRQ enable state saved at suspend time
  91. */
  92. struct mtk_msi_set {
  93. void __iomem *base;
  94. phys_addr_t msg_addr;
  95. u32 saved_irq_state;
  96. };
  97. /**
  98. * struct mtk_gen3_pcie - PCIe port information
  99. * @dev: pointer to PCIe device
  100. * @base: IO mapped register base
  101. * @reg_base: physical register base
  102. * @mac_reset: MAC reset control
  103. * @phy_reset: PHY reset control
  104. * @phy: PHY controller block
  105. * @clks: PCIe clocks
  106. * @num_clks: PCIe clocks count for this port
  107. * @irq: PCIe controller interrupt number
  108. * @saved_irq_state: IRQ enable state saved at suspend time
  109. * @irq_lock: lock protecting IRQ register access
  110. * @intx_domain: legacy INTx IRQ domain
  111. * @msi_domain: MSI IRQ domain
  112. * @msi_bottom_domain: MSI IRQ bottom domain
  113. * @msi_sets: MSI sets information
  114. * @lock: lock protecting IRQ bit map
  115. * @msi_irq_in_use: bit map for assigned MSI IRQ
  116. */
  117. struct mtk_gen3_pcie {
  118. struct device *dev;
  119. void __iomem *base;
  120. phys_addr_t reg_base;
  121. struct reset_control *mac_reset;
  122. struct reset_control *phy_reset;
  123. struct phy *phy;
  124. struct clk_bulk_data *clks;
  125. int num_clks;
  126. int irq;
  127. u32 saved_irq_state;
  128. raw_spinlock_t irq_lock;
  129. struct irq_domain *intx_domain;
  130. struct irq_domain *msi_domain;
  131. struct irq_domain *msi_bottom_domain;
  132. struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
  133. struct mutex lock;
  134. DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
  135. };
  136. /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
  137. static const char *const ltssm_str[] = {
  138. "detect.quiet", /* 0x00 */
  139. "detect.active", /* 0x01 */
  140. "polling.active", /* 0x02 */
  141. "polling.compliance", /* 0x03 */
  142. "polling.configuration", /* 0x04 */
  143. "config.linkwidthstart", /* 0x05 */
  144. "config.linkwidthaccept", /* 0x06 */
  145. "config.lanenumwait", /* 0x07 */
  146. "config.lanenumaccept", /* 0x08 */
  147. "config.complete", /* 0x09 */
  148. "config.idle", /* 0x0A */
  149. "recovery.receiverlock", /* 0x0B */
  150. "recovery.equalization", /* 0x0C */
  151. "recovery.speed", /* 0x0D */
  152. "recovery.receiverconfig", /* 0x0E */
  153. "recovery.idle", /* 0x0F */
  154. "L0", /* 0x10 */
  155. "L0s", /* 0x11 */
  156. "L1.entry", /* 0x12 */
  157. "L1.idle", /* 0x13 */
  158. "L2.idle", /* 0x14 */
  159. "L2.transmitwake", /* 0x15 */
  160. "disable", /* 0x16 */
  161. "loopback.entry", /* 0x17 */
  162. "loopback.active", /* 0x18 */
  163. "loopback.exit", /* 0x19 */
  164. "hotreset", /* 0x1A */
  165. };
  166. /**
  167. * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
  168. * @bus: PCI bus to query
  169. * @devfn: device/function number
  170. * @where: offset in config space
  171. * @size: data size in TLP header
  172. *
  173. * Set byte enable field and device information in configuration TLP header.
  174. */
  175. static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
  176. int where, int size)
  177. {
  178. struct mtk_gen3_pcie *pcie = bus->sysdata;
  179. int bytes;
  180. u32 val;
  181. bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
  182. val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
  183. PCIE_CFG_HEADER(bus->number, devfn);
  184. writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
  185. }
  186. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
  187. int where)
  188. {
  189. struct mtk_gen3_pcie *pcie = bus->sysdata;
  190. return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
  191. }
  192. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  193. int where, int size, u32 *val)
  194. {
  195. mtk_pcie_config_tlp_header(bus, devfn, where, size);
  196. return pci_generic_config_read32(bus, devfn, where, size, val);
  197. }
  198. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  199. int where, int size, u32 val)
  200. {
  201. mtk_pcie_config_tlp_header(bus, devfn, where, size);
  202. if (size <= 2)
  203. val <<= (where & 0x3) * 8;
  204. return pci_generic_config_write32(bus, devfn, where, 4, val);
  205. }
  206. static struct pci_ops mtk_pcie_ops = {
  207. .map_bus = mtk_pcie_map_bus,
  208. .read = mtk_pcie_config_read,
  209. .write = mtk_pcie_config_write,
  210. };
  211. static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
  212. resource_size_t cpu_addr,
  213. resource_size_t pci_addr,
  214. resource_size_t size,
  215. unsigned long type, int num)
  216. {
  217. void __iomem *table;
  218. u32 val;
  219. if (num >= PCIE_MAX_TRANS_TABLES) {
  220. dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
  221. (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
  222. return -ENODEV;
  223. }
  224. table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
  225. num * PCIE_ATR_TLB_SET_OFFSET;
  226. writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
  227. table);
  228. writel_relaxed(upper_32_bits(cpu_addr),
  229. table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
  230. writel_relaxed(lower_32_bits(pci_addr),
  231. table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
  232. writel_relaxed(upper_32_bits(pci_addr),
  233. table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
  234. if (type == IORESOURCE_IO)
  235. val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
  236. else
  237. val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
  238. writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
  239. return 0;
  240. }
  241. static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
  242. {
  243. int i;
  244. u32 val;
  245. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  246. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  247. msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
  248. i * PCIE_MSI_SET_OFFSET;
  249. msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
  250. i * PCIE_MSI_SET_OFFSET;
  251. /* Configure the MSI capture address */
  252. writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
  253. writel_relaxed(upper_32_bits(msi_set->msg_addr),
  254. pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
  255. i * PCIE_MSI_SET_ADDR_HI_OFFSET);
  256. }
  257. val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
  258. val |= PCIE_MSI_SET_ENABLE;
  259. writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
  260. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  261. val |= PCIE_MSI_ENABLE;
  262. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  263. }
  264. static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
  265. {
  266. struct resource_entry *entry;
  267. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  268. unsigned int table_index = 0;
  269. int err;
  270. u32 val;
  271. /* Set as RC mode */
  272. val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
  273. val |= PCIE_RC_MODE;
  274. writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
  275. /* Set class code */
  276. val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
  277. val &= ~GENMASK(31, 8);
  278. val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
  279. writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
  280. /* Mask all INTx interrupts */
  281. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  282. val &= ~PCIE_INTX_ENABLE;
  283. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  284. /* Disable DVFSRC voltage request */
  285. val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
  286. val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
  287. writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
  288. /* Assert all reset signals */
  289. val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
  290. val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
  291. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  292. /*
  293. * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
  294. * and 2.2.1 (Initial Power-Up (G3 to S0)).
  295. * The deassertion of PERST# should be delayed 100ms (TPVPERL)
  296. * for the power and clock to become stable.
  297. */
  298. msleep(100);
  299. /* De-assert reset signals */
  300. val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
  301. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  302. /* Check if the link is up or not */
  303. err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
  304. !!(val & PCIE_PORT_LINKUP), 20,
  305. PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
  306. if (err) {
  307. const char *ltssm_state;
  308. int ltssm_index;
  309. val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
  310. ltssm_index = PCIE_LTSSM_STATE(val);
  311. ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
  312. "Unknown state" : ltssm_str[ltssm_index];
  313. dev_err(pcie->dev,
  314. "PCIe link down, current LTSSM state: %s (%#x)\n",
  315. ltssm_state, val);
  316. return err;
  317. }
  318. mtk_pcie_enable_msi(pcie);
  319. /* Set PCIe translation windows */
  320. resource_list_for_each_entry(entry, &host->windows) {
  321. struct resource *res = entry->res;
  322. unsigned long type = resource_type(res);
  323. resource_size_t cpu_addr;
  324. resource_size_t pci_addr;
  325. resource_size_t size;
  326. const char *range_type;
  327. if (type == IORESOURCE_IO) {
  328. cpu_addr = pci_pio_to_address(res->start);
  329. range_type = "IO";
  330. } else if (type == IORESOURCE_MEM) {
  331. cpu_addr = res->start;
  332. range_type = "MEM";
  333. } else {
  334. continue;
  335. }
  336. pci_addr = res->start - entry->offset;
  337. size = resource_size(res);
  338. err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
  339. type, table_index);
  340. if (err)
  341. return err;
  342. dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
  343. range_type, table_index, (unsigned long long)cpu_addr,
  344. (unsigned long long)pci_addr, (unsigned long long)size);
  345. table_index++;
  346. }
  347. return 0;
  348. }
  349. static int mtk_pcie_set_affinity(struct irq_data *data,
  350. const struct cpumask *mask, bool force)
  351. {
  352. return -EINVAL;
  353. }
  354. static void mtk_pcie_msi_irq_mask(struct irq_data *data)
  355. {
  356. pci_msi_mask_irq(data);
  357. irq_chip_mask_parent(data);
  358. }
  359. static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
  360. {
  361. pci_msi_unmask_irq(data);
  362. irq_chip_unmask_parent(data);
  363. }
  364. static struct irq_chip mtk_msi_irq_chip = {
  365. .irq_ack = irq_chip_ack_parent,
  366. .irq_mask = mtk_pcie_msi_irq_mask,
  367. .irq_unmask = mtk_pcie_msi_irq_unmask,
  368. .name = "MSI",
  369. };
  370. static struct msi_domain_info mtk_msi_domain_info = {
  371. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  372. MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  373. .chip = &mtk_msi_irq_chip,
  374. };
  375. static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  376. {
  377. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  378. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  379. unsigned long hwirq;
  380. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  381. msg->address_hi = upper_32_bits(msi_set->msg_addr);
  382. msg->address_lo = lower_32_bits(msi_set->msg_addr);
  383. msg->data = hwirq;
  384. dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
  385. hwirq, msg->address_hi, msg->address_lo, msg->data);
  386. }
  387. static void mtk_msi_bottom_irq_ack(struct irq_data *data)
  388. {
  389. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  390. unsigned long hwirq;
  391. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  392. writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
  393. }
  394. static void mtk_msi_bottom_irq_mask(struct irq_data *data)
  395. {
  396. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  397. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  398. unsigned long hwirq, flags;
  399. u32 val;
  400. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  401. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  402. val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  403. val &= ~BIT(hwirq);
  404. writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  405. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  406. }
  407. static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
  408. {
  409. struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
  410. struct mtk_gen3_pcie *pcie = data->domain->host_data;
  411. unsigned long hwirq, flags;
  412. u32 val;
  413. hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
  414. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  415. val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  416. val |= BIT(hwirq);
  417. writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  418. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  419. }
  420. static struct irq_chip mtk_msi_bottom_irq_chip = {
  421. .irq_ack = mtk_msi_bottom_irq_ack,
  422. .irq_mask = mtk_msi_bottom_irq_mask,
  423. .irq_unmask = mtk_msi_bottom_irq_unmask,
  424. .irq_compose_msi_msg = mtk_compose_msi_msg,
  425. .irq_set_affinity = mtk_pcie_set_affinity,
  426. .name = "MSI",
  427. };
  428. static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
  429. unsigned int virq, unsigned int nr_irqs,
  430. void *arg)
  431. {
  432. struct mtk_gen3_pcie *pcie = domain->host_data;
  433. struct mtk_msi_set *msi_set;
  434. int i, hwirq, set_idx;
  435. mutex_lock(&pcie->lock);
  436. hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
  437. order_base_2(nr_irqs));
  438. mutex_unlock(&pcie->lock);
  439. if (hwirq < 0)
  440. return -ENOSPC;
  441. set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
  442. msi_set = &pcie->msi_sets[set_idx];
  443. for (i = 0; i < nr_irqs; i++)
  444. irq_domain_set_info(domain, virq + i, hwirq + i,
  445. &mtk_msi_bottom_irq_chip, msi_set,
  446. handle_edge_irq, NULL, NULL);
  447. return 0;
  448. }
  449. static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
  450. unsigned int virq, unsigned int nr_irqs)
  451. {
  452. struct mtk_gen3_pcie *pcie = domain->host_data;
  453. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  454. mutex_lock(&pcie->lock);
  455. bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
  456. order_base_2(nr_irqs));
  457. mutex_unlock(&pcie->lock);
  458. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  459. }
  460. static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
  461. .alloc = mtk_msi_bottom_domain_alloc,
  462. .free = mtk_msi_bottom_domain_free,
  463. };
  464. static void mtk_intx_mask(struct irq_data *data)
  465. {
  466. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  467. unsigned long flags;
  468. u32 val;
  469. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  470. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  471. val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
  472. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  473. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  474. }
  475. static void mtk_intx_unmask(struct irq_data *data)
  476. {
  477. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  478. unsigned long flags;
  479. u32 val;
  480. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  481. val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  482. val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
  483. writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
  484. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  485. }
  486. /**
  487. * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
  488. * @data: pointer to chip specific data
  489. *
  490. * As an emulated level IRQ, its interrupt status will remain
  491. * until the corresponding de-assert message is received; hence that
  492. * the status can only be cleared when the interrupt has been serviced.
  493. */
  494. static void mtk_intx_eoi(struct irq_data *data)
  495. {
  496. struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
  497. unsigned long hwirq;
  498. hwirq = data->hwirq + PCIE_INTX_SHIFT;
  499. writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
  500. }
  501. static struct irq_chip mtk_intx_irq_chip = {
  502. .irq_mask = mtk_intx_mask,
  503. .irq_unmask = mtk_intx_unmask,
  504. .irq_eoi = mtk_intx_eoi,
  505. .irq_set_affinity = mtk_pcie_set_affinity,
  506. .name = "INTx",
  507. };
  508. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  509. irq_hw_number_t hwirq)
  510. {
  511. irq_set_chip_data(irq, domain->host_data);
  512. irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
  513. handle_fasteoi_irq, "INTx");
  514. return 0;
  515. }
  516. static const struct irq_domain_ops intx_domain_ops = {
  517. .map = mtk_pcie_intx_map,
  518. };
  519. static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
  520. {
  521. struct device *dev = pcie->dev;
  522. struct device_node *intc_node, *node = dev->of_node;
  523. int ret;
  524. raw_spin_lock_init(&pcie->irq_lock);
  525. /* Setup INTx */
  526. intc_node = of_get_child_by_name(node, "interrupt-controller");
  527. if (!intc_node) {
  528. dev_err(dev, "missing interrupt-controller node\n");
  529. return -ENODEV;
  530. }
  531. pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
  532. &intx_domain_ops, pcie);
  533. if (!pcie->intx_domain) {
  534. dev_err(dev, "failed to create INTx IRQ domain\n");
  535. ret = -ENODEV;
  536. goto out_put_node;
  537. }
  538. /* Setup MSI */
  539. mutex_init(&pcie->lock);
  540. pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
  541. &mtk_msi_bottom_domain_ops, pcie);
  542. if (!pcie->msi_bottom_domain) {
  543. dev_err(dev, "failed to create MSI bottom domain\n");
  544. ret = -ENODEV;
  545. goto err_msi_bottom_domain;
  546. }
  547. pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
  548. &mtk_msi_domain_info,
  549. pcie->msi_bottom_domain);
  550. if (!pcie->msi_domain) {
  551. dev_err(dev, "failed to create MSI domain\n");
  552. ret = -ENODEV;
  553. goto err_msi_domain;
  554. }
  555. of_node_put(intc_node);
  556. return 0;
  557. err_msi_domain:
  558. irq_domain_remove(pcie->msi_bottom_domain);
  559. err_msi_bottom_domain:
  560. irq_domain_remove(pcie->intx_domain);
  561. out_put_node:
  562. of_node_put(intc_node);
  563. return ret;
  564. }
  565. static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
  566. {
  567. irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
  568. if (pcie->intx_domain)
  569. irq_domain_remove(pcie->intx_domain);
  570. if (pcie->msi_domain)
  571. irq_domain_remove(pcie->msi_domain);
  572. if (pcie->msi_bottom_domain)
  573. irq_domain_remove(pcie->msi_bottom_domain);
  574. irq_dispose_mapping(pcie->irq);
  575. }
  576. static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
  577. {
  578. struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
  579. unsigned long msi_enable, msi_status;
  580. irq_hw_number_t bit, hwirq;
  581. msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  582. do {
  583. msi_status = readl_relaxed(msi_set->base +
  584. PCIE_MSI_SET_STATUS_OFFSET);
  585. msi_status &= msi_enable;
  586. if (!msi_status)
  587. break;
  588. for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
  589. hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
  590. generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
  591. }
  592. } while (true);
  593. }
  594. static void mtk_pcie_irq_handler(struct irq_desc *desc)
  595. {
  596. struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
  597. struct irq_chip *irqchip = irq_desc_get_chip(desc);
  598. unsigned long status;
  599. irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
  600. chained_irq_enter(irqchip, desc);
  601. status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
  602. for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
  603. PCIE_INTX_SHIFT)
  604. generic_handle_domain_irq(pcie->intx_domain,
  605. irq_bit - PCIE_INTX_SHIFT);
  606. irq_bit = PCIE_MSI_SHIFT;
  607. for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
  608. PCIE_MSI_SHIFT) {
  609. mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
  610. writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
  611. }
  612. chained_irq_exit(irqchip, desc);
  613. }
  614. static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
  615. {
  616. struct device *dev = pcie->dev;
  617. struct platform_device *pdev = to_platform_device(dev);
  618. int err;
  619. err = mtk_pcie_init_irq_domains(pcie);
  620. if (err)
  621. return err;
  622. pcie->irq = platform_get_irq(pdev, 0);
  623. if (pcie->irq < 0)
  624. return pcie->irq;
  625. irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
  626. return 0;
  627. }
  628. static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
  629. {
  630. struct device *dev = pcie->dev;
  631. struct platform_device *pdev = to_platform_device(dev);
  632. struct resource *regs;
  633. int ret;
  634. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
  635. if (!regs)
  636. return -EINVAL;
  637. pcie->base = devm_ioremap_resource(dev, regs);
  638. if (IS_ERR(pcie->base)) {
  639. dev_err(dev, "failed to map register base\n");
  640. return PTR_ERR(pcie->base);
  641. }
  642. pcie->reg_base = regs->start;
  643. pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
  644. if (IS_ERR(pcie->phy_reset)) {
  645. ret = PTR_ERR(pcie->phy_reset);
  646. if (ret != -EPROBE_DEFER)
  647. dev_err(dev, "failed to get PHY reset\n");
  648. return ret;
  649. }
  650. pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
  651. if (IS_ERR(pcie->mac_reset)) {
  652. ret = PTR_ERR(pcie->mac_reset);
  653. if (ret != -EPROBE_DEFER)
  654. dev_err(dev, "failed to get MAC reset\n");
  655. return ret;
  656. }
  657. pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
  658. if (IS_ERR(pcie->phy)) {
  659. ret = PTR_ERR(pcie->phy);
  660. if (ret != -EPROBE_DEFER)
  661. dev_err(dev, "failed to get PHY\n");
  662. return ret;
  663. }
  664. pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
  665. if (pcie->num_clks < 0) {
  666. dev_err(dev, "failed to get clocks\n");
  667. return pcie->num_clks;
  668. }
  669. return 0;
  670. }
  671. static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
  672. {
  673. struct device *dev = pcie->dev;
  674. int err;
  675. /* PHY power on and enable pipe clock */
  676. reset_control_deassert(pcie->phy_reset);
  677. err = phy_init(pcie->phy);
  678. if (err) {
  679. dev_err(dev, "failed to initialize PHY\n");
  680. goto err_phy_init;
  681. }
  682. err = phy_power_on(pcie->phy);
  683. if (err) {
  684. dev_err(dev, "failed to power on PHY\n");
  685. goto err_phy_on;
  686. }
  687. /* MAC power on and enable transaction layer clocks */
  688. reset_control_deassert(pcie->mac_reset);
  689. pm_runtime_enable(dev);
  690. pm_runtime_get_sync(dev);
  691. err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
  692. if (err) {
  693. dev_err(dev, "failed to enable clocks\n");
  694. goto err_clk_init;
  695. }
  696. return 0;
  697. err_clk_init:
  698. pm_runtime_put_sync(dev);
  699. pm_runtime_disable(dev);
  700. reset_control_assert(pcie->mac_reset);
  701. phy_power_off(pcie->phy);
  702. err_phy_on:
  703. phy_exit(pcie->phy);
  704. err_phy_init:
  705. reset_control_assert(pcie->phy_reset);
  706. return err;
  707. }
  708. static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
  709. {
  710. clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
  711. pm_runtime_put_sync(pcie->dev);
  712. pm_runtime_disable(pcie->dev);
  713. reset_control_assert(pcie->mac_reset);
  714. phy_power_off(pcie->phy);
  715. phy_exit(pcie->phy);
  716. reset_control_assert(pcie->phy_reset);
  717. }
  718. static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
  719. {
  720. int err;
  721. err = mtk_pcie_parse_port(pcie);
  722. if (err)
  723. return err;
  724. /*
  725. * The controller may have been left out of reset by the bootloader
  726. * so make sure that we get a clean start by asserting resets here.
  727. */
  728. reset_control_assert(pcie->phy_reset);
  729. reset_control_assert(pcie->mac_reset);
  730. usleep_range(10, 20);
  731. /* Don't touch the hardware registers before power up */
  732. err = mtk_pcie_power_up(pcie);
  733. if (err)
  734. return err;
  735. /* Try link up */
  736. err = mtk_pcie_startup_port(pcie);
  737. if (err)
  738. goto err_setup;
  739. err = mtk_pcie_setup_irq(pcie);
  740. if (err)
  741. goto err_setup;
  742. return 0;
  743. err_setup:
  744. mtk_pcie_power_down(pcie);
  745. return err;
  746. }
  747. static int mtk_pcie_probe(struct platform_device *pdev)
  748. {
  749. struct device *dev = &pdev->dev;
  750. struct mtk_gen3_pcie *pcie;
  751. struct pci_host_bridge *host;
  752. int err;
  753. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  754. if (!host)
  755. return -ENOMEM;
  756. pcie = pci_host_bridge_priv(host);
  757. pcie->dev = dev;
  758. platform_set_drvdata(pdev, pcie);
  759. err = mtk_pcie_setup(pcie);
  760. if (err)
  761. return err;
  762. host->ops = &mtk_pcie_ops;
  763. host->sysdata = pcie;
  764. err = pci_host_probe(host);
  765. if (err) {
  766. mtk_pcie_irq_teardown(pcie);
  767. mtk_pcie_power_down(pcie);
  768. return err;
  769. }
  770. return 0;
  771. }
  772. static int mtk_pcie_remove(struct platform_device *pdev)
  773. {
  774. struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
  775. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  776. pci_lock_rescan_remove();
  777. pci_stop_root_bus(host->bus);
  778. pci_remove_root_bus(host->bus);
  779. pci_unlock_rescan_remove();
  780. mtk_pcie_irq_teardown(pcie);
  781. mtk_pcie_power_down(pcie);
  782. return 0;
  783. }
  784. static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
  785. {
  786. int i;
  787. raw_spin_lock(&pcie->irq_lock);
  788. pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
  789. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  790. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  791. msi_set->saved_irq_state = readl_relaxed(msi_set->base +
  792. PCIE_MSI_SET_ENABLE_OFFSET);
  793. }
  794. raw_spin_unlock(&pcie->irq_lock);
  795. }
  796. static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
  797. {
  798. int i;
  799. raw_spin_lock(&pcie->irq_lock);
  800. writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
  801. for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
  802. struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
  803. writel_relaxed(msi_set->saved_irq_state,
  804. msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
  805. }
  806. raw_spin_unlock(&pcie->irq_lock);
  807. }
  808. static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
  809. {
  810. u32 val;
  811. val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
  812. val |= PCIE_TURN_OFF_LINK;
  813. writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
  814. /* Check the link is L2 */
  815. return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
  816. (PCIE_LTSSM_STATE(val) ==
  817. PCIE_LTSSM_STATE_L2_IDLE), 20,
  818. 50 * USEC_PER_MSEC);
  819. }
  820. static int mtk_pcie_suspend_noirq(struct device *dev)
  821. {
  822. struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
  823. int err;
  824. u32 val;
  825. /* Trigger link to L2 state */
  826. err = mtk_pcie_turn_off_link(pcie);
  827. if (err) {
  828. dev_err(pcie->dev, "cannot enter L2 state\n");
  829. return err;
  830. }
  831. /* Pull down the PERST# pin */
  832. val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
  833. val |= PCIE_PE_RSTB;
  834. writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
  835. dev_dbg(pcie->dev, "entered L2 states successfully");
  836. mtk_pcie_irq_save(pcie);
  837. mtk_pcie_power_down(pcie);
  838. return 0;
  839. }
  840. static int mtk_pcie_resume_noirq(struct device *dev)
  841. {
  842. struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
  843. int err;
  844. err = mtk_pcie_power_up(pcie);
  845. if (err)
  846. return err;
  847. err = mtk_pcie_startup_port(pcie);
  848. if (err) {
  849. mtk_pcie_power_down(pcie);
  850. return err;
  851. }
  852. mtk_pcie_irq_restore(pcie);
  853. return 0;
  854. }
  855. static const struct dev_pm_ops mtk_pcie_pm_ops = {
  856. NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
  857. mtk_pcie_resume_noirq)
  858. };
  859. static const struct of_device_id mtk_pcie_of_match[] = {
  860. { .compatible = "mediatek,mt8192-pcie" },
  861. {},
  862. };
  863. MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
  864. static struct platform_driver mtk_pcie_driver = {
  865. .probe = mtk_pcie_probe,
  866. .remove = mtk_pcie_remove,
  867. .driver = {
  868. .name = "mtk-pcie-gen3",
  869. .of_match_table = mtk_pcie_of_match,
  870. .pm = &mtk_pcie_pm_ops,
  871. },
  872. };
  873. module_platform_driver(mtk_pcie_driver);
  874. MODULE_LICENSE("GPL v2");