pcie-mediatek.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MediaTek PCIe host controller driver.
  4. *
  5. * Copyright (c) 2017 MediaTek Inc.
  6. * Author: Ryder Lee <[email protected]>
  7. * Honghui Zhang <[email protected]>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqchip/chained_irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/kernel.h>
  16. #include <linux/mfd/syscon.h>
  17. #include <linux/msi.h>
  18. #include <linux/module.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_pci.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/pci.h>
  23. #include <linux/phy/phy.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/regmap.h>
  27. #include <linux/reset.h>
  28. #include "../pci.h"
  29. /* PCIe shared registers */
  30. #define PCIE_SYS_CFG 0x00
  31. #define PCIE_INT_ENABLE 0x0c
  32. #define PCIE_CFG_ADDR 0x20
  33. #define PCIE_CFG_DATA 0x24
  34. /* PCIe per port registers */
  35. #define PCIE_BAR0_SETUP 0x10
  36. #define PCIE_CLASS 0x34
  37. #define PCIE_LINK_STATUS 0x50
  38. #define PCIE_PORT_INT_EN(x) BIT(20 + (x))
  39. #define PCIE_PORT_PERST(x) BIT(1 + (x))
  40. #define PCIE_PORT_LINKUP BIT(0)
  41. #define PCIE_BAR_MAP_MAX GENMASK(31, 16)
  42. #define PCIE_BAR_ENABLE BIT(0)
  43. #define PCIE_REVISION_ID BIT(0)
  44. #define PCIE_CLASS_CODE (0x60400 << 8)
  45. #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
  46. ((((regn) >> 8) & GENMASK(3, 0)) << 24))
  47. #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
  48. #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
  49. #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
  50. #define PCIE_CONF_ADDR(regn, fun, dev, bus) \
  51. (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
  52. PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
  53. /* MediaTek specific configuration registers */
  54. #define PCIE_FTS_NUM 0x70c
  55. #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
  56. #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
  57. #define PCIE_FC_CREDIT 0x73c
  58. #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
  59. #define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
  60. /* PCIe V2 share registers */
  61. #define PCIE_SYS_CFG_V2 0x0
  62. #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8)
  63. #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8)
  64. /* PCIe V2 per-port registers */
  65. #define PCIE_MSI_VECTOR 0x0c0
  66. #define PCIE_CONF_VEND_ID 0x100
  67. #define PCIE_CONF_DEVICE_ID 0x102
  68. #define PCIE_CONF_CLASS_ID 0x106
  69. #define PCIE_INT_MASK 0x420
  70. #define INTX_MASK GENMASK(19, 16)
  71. #define INTX_SHIFT 16
  72. #define PCIE_INT_STATUS 0x424
  73. #define MSI_STATUS BIT(23)
  74. #define PCIE_IMSI_STATUS 0x42c
  75. #define PCIE_IMSI_ADDR 0x430
  76. #define MSI_MASK BIT(23)
  77. #define MTK_MSI_IRQS_NUM 32
  78. #define PCIE_AHB_TRANS_BASE0_L 0x438
  79. #define PCIE_AHB_TRANS_BASE0_H 0x43c
  80. #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0))
  81. #define PCIE_AXI_WINDOW0 0x448
  82. #define WIN_ENABLE BIT(7)
  83. /*
  84. * Define PCIe to AHB window size as 2^33 to support max 8GB address space
  85. * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
  86. * start from 0x40000000).
  87. */
  88. #define PCIE2AHB_SIZE 0x21
  89. /* PCIe V2 configuration transaction header */
  90. #define PCIE_CFG_HEADER0 0x460
  91. #define PCIE_CFG_HEADER1 0x464
  92. #define PCIE_CFG_HEADER2 0x468
  93. #define PCIE_CFG_WDATA 0x470
  94. #define PCIE_APP_TLP_REQ 0x488
  95. #define PCIE_CFG_RDATA 0x48c
  96. #define APP_CFG_REQ BIT(0)
  97. #define APP_CPL_STATUS GENMASK(7, 5)
  98. #define CFG_WRRD_TYPE_0 4
  99. #define CFG_WR_FMT 2
  100. #define CFG_RD_FMT 0
  101. #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0))
  102. #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24))
  103. #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29))
  104. #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2))
  105. #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16))
  106. #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19))
  107. #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24))
  108. #define CFG_HEADER_DW0(type, fmt) \
  109. (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
  110. #define CFG_HEADER_DW1(where, size) \
  111. (GENMASK(((size) - 1), 0) << ((where) & 0x3))
  112. #define CFG_HEADER_DW2(regn, fun, dev, bus) \
  113. (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
  114. CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
  115. #define PCIE_RST_CTRL 0x510
  116. #define PCIE_PHY_RSTB BIT(0)
  117. #define PCIE_PIPE_SRSTB BIT(1)
  118. #define PCIE_MAC_SRSTB BIT(2)
  119. #define PCIE_CRSTB BIT(3)
  120. #define PCIE_PERSTB BIT(8)
  121. #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13)
  122. #define PCIE_LINK_STATUS_V2 0x804
  123. #define PCIE_PORT_LINKUP_V2 BIT(10)
  124. struct mtk_pcie_port;
  125. /**
  126. * struct mtk_pcie_soc - differentiate between host generations
  127. * @need_fix_class_id: whether this host's class ID needed to be fixed or not
  128. * @need_fix_device_id: whether this host's device ID needed to be fixed or not
  129. * @no_msi: Bridge has no MSI support, and relies on an external block
  130. * @device_id: device ID which this host need to be fixed
  131. * @ops: pointer to configuration access functions
  132. * @startup: pointer to controller setting functions
  133. * @setup_irq: pointer to initialize IRQ functions
  134. */
  135. struct mtk_pcie_soc {
  136. bool need_fix_class_id;
  137. bool need_fix_device_id;
  138. bool no_msi;
  139. unsigned int device_id;
  140. struct pci_ops *ops;
  141. int (*startup)(struct mtk_pcie_port *port);
  142. int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
  143. };
  144. /**
  145. * struct mtk_pcie_port - PCIe port information
  146. * @base: IO mapped register base
  147. * @list: port list
  148. * @pcie: pointer to PCIe host info
  149. * @reset: pointer to port reset control
  150. * @sys_ck: pointer to transaction/data link layer clock
  151. * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
  152. * and RC initiated MMIO access
  153. * @axi_ck: pointer to application layer MMIO channel operating clock
  154. * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
  155. * when pcie_mac_ck/pcie_pipe_ck is turned off
  156. * @obff_ck: pointer to OBFF functional block operating clock
  157. * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
  158. * @phy: pointer to PHY control block
  159. * @slot: port slot
  160. * @irq: GIC irq
  161. * @irq_domain: legacy INTx IRQ domain
  162. * @inner_domain: inner IRQ domain
  163. * @msi_domain: MSI IRQ domain
  164. * @lock: protect the msi_irq_in_use bitmap
  165. * @msi_irq_in_use: bit map for assigned MSI IRQ
  166. */
  167. struct mtk_pcie_port {
  168. void __iomem *base;
  169. struct list_head list;
  170. struct mtk_pcie *pcie;
  171. struct reset_control *reset;
  172. struct clk *sys_ck;
  173. struct clk *ahb_ck;
  174. struct clk *axi_ck;
  175. struct clk *aux_ck;
  176. struct clk *obff_ck;
  177. struct clk *pipe_ck;
  178. struct phy *phy;
  179. u32 slot;
  180. int irq;
  181. struct irq_domain *irq_domain;
  182. struct irq_domain *inner_domain;
  183. struct irq_domain *msi_domain;
  184. struct mutex lock;
  185. DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
  186. };
  187. /**
  188. * struct mtk_pcie - PCIe host information
  189. * @dev: pointer to PCIe device
  190. * @base: IO mapped register base
  191. * @cfg: IO mapped register map for PCIe config
  192. * @free_ck: free-run reference clock
  193. * @mem: non-prefetchable memory resource
  194. * @ports: pointer to PCIe port information
  195. * @soc: pointer to SoC-dependent operations
  196. */
  197. struct mtk_pcie {
  198. struct device *dev;
  199. void __iomem *base;
  200. struct regmap *cfg;
  201. struct clk *free_ck;
  202. struct list_head ports;
  203. const struct mtk_pcie_soc *soc;
  204. };
  205. static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
  206. {
  207. struct device *dev = pcie->dev;
  208. clk_disable_unprepare(pcie->free_ck);
  209. pm_runtime_put_sync(dev);
  210. pm_runtime_disable(dev);
  211. }
  212. static void mtk_pcie_port_free(struct mtk_pcie_port *port)
  213. {
  214. struct mtk_pcie *pcie = port->pcie;
  215. struct device *dev = pcie->dev;
  216. devm_iounmap(dev, port->base);
  217. list_del(&port->list);
  218. devm_kfree(dev, port);
  219. }
  220. static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
  221. {
  222. struct mtk_pcie_port *port, *tmp;
  223. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  224. phy_power_off(port->phy);
  225. phy_exit(port->phy);
  226. clk_disable_unprepare(port->pipe_ck);
  227. clk_disable_unprepare(port->obff_ck);
  228. clk_disable_unprepare(port->axi_ck);
  229. clk_disable_unprepare(port->aux_ck);
  230. clk_disable_unprepare(port->ahb_ck);
  231. clk_disable_unprepare(port->sys_ck);
  232. mtk_pcie_port_free(port);
  233. }
  234. mtk_pcie_subsys_powerdown(pcie);
  235. }
  236. static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
  237. {
  238. u32 val;
  239. int err;
  240. err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
  241. !(val & APP_CFG_REQ), 10,
  242. 100 * USEC_PER_MSEC);
  243. if (err)
  244. return PCIBIOS_SET_FAILED;
  245. if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
  246. return PCIBIOS_SET_FAILED;
  247. return PCIBIOS_SUCCESSFUL;
  248. }
  249. static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  250. int where, int size, u32 *val)
  251. {
  252. u32 tmp;
  253. /* Write PCIe configuration transaction header for Cfgrd */
  254. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
  255. port->base + PCIE_CFG_HEADER0);
  256. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  257. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  258. port->base + PCIE_CFG_HEADER2);
  259. /* Trigger h/w to transmit Cfgrd TLP */
  260. tmp = readl(port->base + PCIE_APP_TLP_REQ);
  261. tmp |= APP_CFG_REQ;
  262. writel(tmp, port->base + PCIE_APP_TLP_REQ);
  263. /* Check completion status */
  264. if (mtk_pcie_check_cfg_cpld(port))
  265. return PCIBIOS_SET_FAILED;
  266. /* Read cpld payload of Cfgrd */
  267. *val = readl(port->base + PCIE_CFG_RDATA);
  268. if (size == 1)
  269. *val = (*val >> (8 * (where & 3))) & 0xff;
  270. else if (size == 2)
  271. *val = (*val >> (8 * (where & 3))) & 0xffff;
  272. return PCIBIOS_SUCCESSFUL;
  273. }
  274. static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
  275. int where, int size, u32 val)
  276. {
  277. /* Write PCIe configuration transaction header for Cfgwr */
  278. writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
  279. port->base + PCIE_CFG_HEADER0);
  280. writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
  281. writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
  282. port->base + PCIE_CFG_HEADER2);
  283. /* Write Cfgwr data */
  284. val = val << 8 * (where & 3);
  285. writel(val, port->base + PCIE_CFG_WDATA);
  286. /* Trigger h/w to transmit Cfgwr TLP */
  287. val = readl(port->base + PCIE_APP_TLP_REQ);
  288. val |= APP_CFG_REQ;
  289. writel(val, port->base + PCIE_APP_TLP_REQ);
  290. /* Check completion status */
  291. return mtk_pcie_check_cfg_cpld(port);
  292. }
  293. static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
  294. unsigned int devfn)
  295. {
  296. struct mtk_pcie *pcie = bus->sysdata;
  297. struct mtk_pcie_port *port;
  298. struct pci_dev *dev = NULL;
  299. /*
  300. * Walk the bus hierarchy to get the devfn value
  301. * of the port in the root bus.
  302. */
  303. while (bus && bus->number) {
  304. dev = bus->self;
  305. bus = dev->bus;
  306. devfn = dev->devfn;
  307. }
  308. list_for_each_entry(port, &pcie->ports, list)
  309. if (port->slot == PCI_SLOT(devfn))
  310. return port;
  311. return NULL;
  312. }
  313. static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
  314. int where, int size, u32 *val)
  315. {
  316. struct mtk_pcie_port *port;
  317. u32 bn = bus->number;
  318. port = mtk_pcie_find_port(bus, devfn);
  319. if (!port)
  320. return PCIBIOS_DEVICE_NOT_FOUND;
  321. return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
  322. }
  323. static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
  324. int where, int size, u32 val)
  325. {
  326. struct mtk_pcie_port *port;
  327. u32 bn = bus->number;
  328. port = mtk_pcie_find_port(bus, devfn);
  329. if (!port)
  330. return PCIBIOS_DEVICE_NOT_FOUND;
  331. return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
  332. }
  333. static struct pci_ops mtk_pcie_ops_v2 = {
  334. .read = mtk_pcie_config_read,
  335. .write = mtk_pcie_config_write,
  336. };
  337. static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  338. {
  339. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  340. phys_addr_t addr;
  341. /* MT2712/MT7622 only support 32-bit MSI addresses */
  342. addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  343. msg->address_hi = 0;
  344. msg->address_lo = lower_32_bits(addr);
  345. msg->data = data->hwirq;
  346. dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
  347. (int)data->hwirq, msg->address_hi, msg->address_lo);
  348. }
  349. static int mtk_msi_set_affinity(struct irq_data *irq_data,
  350. const struct cpumask *mask, bool force)
  351. {
  352. return -EINVAL;
  353. }
  354. static void mtk_msi_ack_irq(struct irq_data *data)
  355. {
  356. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
  357. u32 hwirq = data->hwirq;
  358. writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
  359. }
  360. static struct irq_chip mtk_msi_bottom_irq_chip = {
  361. .name = "MTK MSI",
  362. .irq_compose_msi_msg = mtk_compose_msi_msg,
  363. .irq_set_affinity = mtk_msi_set_affinity,
  364. .irq_ack = mtk_msi_ack_irq,
  365. };
  366. static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  367. unsigned int nr_irqs, void *args)
  368. {
  369. struct mtk_pcie_port *port = domain->host_data;
  370. unsigned long bit;
  371. WARN_ON(nr_irqs != 1);
  372. mutex_lock(&port->lock);
  373. bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
  374. if (bit >= MTK_MSI_IRQS_NUM) {
  375. mutex_unlock(&port->lock);
  376. return -ENOSPC;
  377. }
  378. __set_bit(bit, port->msi_irq_in_use);
  379. mutex_unlock(&port->lock);
  380. irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
  381. domain->host_data, handle_edge_irq,
  382. NULL, NULL);
  383. return 0;
  384. }
  385. static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
  386. unsigned int virq, unsigned int nr_irqs)
  387. {
  388. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  389. struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
  390. mutex_lock(&port->lock);
  391. if (!test_bit(d->hwirq, port->msi_irq_in_use))
  392. dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
  393. d->hwirq);
  394. else
  395. __clear_bit(d->hwirq, port->msi_irq_in_use);
  396. mutex_unlock(&port->lock);
  397. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  398. }
  399. static const struct irq_domain_ops msi_domain_ops = {
  400. .alloc = mtk_pcie_irq_domain_alloc,
  401. .free = mtk_pcie_irq_domain_free,
  402. };
  403. static struct irq_chip mtk_msi_irq_chip = {
  404. .name = "MTK PCIe MSI",
  405. .irq_ack = irq_chip_ack_parent,
  406. .irq_mask = pci_msi_mask_irq,
  407. .irq_unmask = pci_msi_unmask_irq,
  408. };
  409. static struct msi_domain_info mtk_msi_domain_info = {
  410. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  411. MSI_FLAG_PCI_MSIX),
  412. .chip = &mtk_msi_irq_chip,
  413. };
  414. static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
  415. {
  416. struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
  417. mutex_init(&port->lock);
  418. port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
  419. &msi_domain_ops, port);
  420. if (!port->inner_domain) {
  421. dev_err(port->pcie->dev, "failed to create IRQ domain\n");
  422. return -ENOMEM;
  423. }
  424. port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
  425. port->inner_domain);
  426. if (!port->msi_domain) {
  427. dev_err(port->pcie->dev, "failed to create MSI domain\n");
  428. irq_domain_remove(port->inner_domain);
  429. return -ENOMEM;
  430. }
  431. return 0;
  432. }
  433. static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
  434. {
  435. u32 val;
  436. phys_addr_t msg_addr;
  437. msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
  438. val = lower_32_bits(msg_addr);
  439. writel(val, port->base + PCIE_IMSI_ADDR);
  440. val = readl(port->base + PCIE_INT_MASK);
  441. val &= ~MSI_MASK;
  442. writel(val, port->base + PCIE_INT_MASK);
  443. }
  444. static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
  445. {
  446. struct mtk_pcie_port *port, *tmp;
  447. list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
  448. irq_set_chained_handler_and_data(port->irq, NULL, NULL);
  449. if (port->irq_domain)
  450. irq_domain_remove(port->irq_domain);
  451. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  452. if (port->msi_domain)
  453. irq_domain_remove(port->msi_domain);
  454. if (port->inner_domain)
  455. irq_domain_remove(port->inner_domain);
  456. }
  457. irq_dispose_mapping(port->irq);
  458. }
  459. }
  460. static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  461. irq_hw_number_t hwirq)
  462. {
  463. irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
  464. irq_set_chip_data(irq, domain->host_data);
  465. return 0;
  466. }
  467. static const struct irq_domain_ops intx_domain_ops = {
  468. .map = mtk_pcie_intx_map,
  469. };
  470. static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
  471. struct device_node *node)
  472. {
  473. struct device *dev = port->pcie->dev;
  474. struct device_node *pcie_intc_node;
  475. int ret;
  476. /* Setup INTx */
  477. pcie_intc_node = of_get_next_child(node, NULL);
  478. if (!pcie_intc_node) {
  479. dev_err(dev, "no PCIe Intc node found\n");
  480. return -ENODEV;
  481. }
  482. port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  483. &intx_domain_ops, port);
  484. of_node_put(pcie_intc_node);
  485. if (!port->irq_domain) {
  486. dev_err(dev, "failed to get INTx IRQ domain\n");
  487. return -ENODEV;
  488. }
  489. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  490. ret = mtk_pcie_allocate_msi_domains(port);
  491. if (ret)
  492. return ret;
  493. }
  494. return 0;
  495. }
  496. static void mtk_pcie_intr_handler(struct irq_desc *desc)
  497. {
  498. struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
  499. struct irq_chip *irqchip = irq_desc_get_chip(desc);
  500. unsigned long status;
  501. u32 bit = INTX_SHIFT;
  502. chained_irq_enter(irqchip, desc);
  503. status = readl(port->base + PCIE_INT_STATUS);
  504. if (status & INTX_MASK) {
  505. for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
  506. /* Clear the INTx */
  507. writel(1 << bit, port->base + PCIE_INT_STATUS);
  508. generic_handle_domain_irq(port->irq_domain,
  509. bit - INTX_SHIFT);
  510. }
  511. }
  512. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  513. if (status & MSI_STATUS){
  514. unsigned long imsi_status;
  515. while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
  516. for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
  517. generic_handle_domain_irq(port->inner_domain, bit);
  518. }
  519. /* Clear MSI interrupt status */
  520. writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
  521. }
  522. }
  523. chained_irq_exit(irqchip, desc);
  524. }
  525. static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
  526. struct device_node *node)
  527. {
  528. struct mtk_pcie *pcie = port->pcie;
  529. struct device *dev = pcie->dev;
  530. struct platform_device *pdev = to_platform_device(dev);
  531. int err;
  532. err = mtk_pcie_init_irq_domain(port, node);
  533. if (err) {
  534. dev_err(dev, "failed to init PCIe IRQ domain\n");
  535. return err;
  536. }
  537. if (of_find_property(dev->of_node, "interrupt-names", NULL))
  538. port->irq = platform_get_irq_byname(pdev, "pcie_irq");
  539. else
  540. port->irq = platform_get_irq(pdev, port->slot);
  541. if (port->irq < 0)
  542. return port->irq;
  543. irq_set_chained_handler_and_data(port->irq,
  544. mtk_pcie_intr_handler, port);
  545. return 0;
  546. }
  547. static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
  548. {
  549. struct mtk_pcie *pcie = port->pcie;
  550. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  551. struct resource *mem = NULL;
  552. struct resource_entry *entry;
  553. const struct mtk_pcie_soc *soc = port->pcie->soc;
  554. u32 val;
  555. int err;
  556. entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
  557. if (entry)
  558. mem = entry->res;
  559. if (!mem)
  560. return -EINVAL;
  561. /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
  562. if (pcie->base) {
  563. val = readl(pcie->base + PCIE_SYS_CFG_V2);
  564. val |= PCIE_CSR_LTSSM_EN(port->slot) |
  565. PCIE_CSR_ASPM_L1_EN(port->slot);
  566. writel(val, pcie->base + PCIE_SYS_CFG_V2);
  567. } else if (pcie->cfg) {
  568. val = PCIE_CSR_LTSSM_EN(port->slot) |
  569. PCIE_CSR_ASPM_L1_EN(port->slot);
  570. regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
  571. }
  572. /* Assert all reset signals */
  573. writel(0, port->base + PCIE_RST_CTRL);
  574. /*
  575. * Enable PCIe link down reset, if link status changed from link up to
  576. * link down, this will reset MAC control registers and configuration
  577. * space.
  578. */
  579. writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
  580. /*
  581. * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
  582. * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
  583. * be delayed 100ms (TPVPERL) for the power and clock to become stable.
  584. */
  585. msleep(100);
  586. /* De-assert PHY, PE, PIPE, MAC and configuration reset */
  587. val = readl(port->base + PCIE_RST_CTRL);
  588. val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
  589. PCIE_MAC_SRSTB | PCIE_CRSTB;
  590. writel(val, port->base + PCIE_RST_CTRL);
  591. /* Set up vendor ID and class code */
  592. if (soc->need_fix_class_id) {
  593. val = PCI_VENDOR_ID_MEDIATEK;
  594. writew(val, port->base + PCIE_CONF_VEND_ID);
  595. val = PCI_CLASS_BRIDGE_PCI;
  596. writew(val, port->base + PCIE_CONF_CLASS_ID);
  597. }
  598. if (soc->need_fix_device_id)
  599. writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
  600. /* 100ms timeout value should be enough for Gen1/2 training */
  601. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
  602. !!(val & PCIE_PORT_LINKUP_V2), 20,
  603. 100 * USEC_PER_MSEC);
  604. if (err)
  605. return -ETIMEDOUT;
  606. /* Set INTx mask */
  607. val = readl(port->base + PCIE_INT_MASK);
  608. val &= ~INTX_MASK;
  609. writel(val, port->base + PCIE_INT_MASK);
  610. if (IS_ENABLED(CONFIG_PCI_MSI))
  611. mtk_pcie_enable_msi(port);
  612. /* Set AHB to PCIe translation windows */
  613. val = lower_32_bits(mem->start) |
  614. AHB2PCIE_SIZE(fls(resource_size(mem)));
  615. writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
  616. val = upper_32_bits(mem->start);
  617. writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
  618. /* Set PCIe to AXI translation memory space.*/
  619. val = PCIE2AHB_SIZE | WIN_ENABLE;
  620. writel(val, port->base + PCIE_AXI_WINDOW0);
  621. return 0;
  622. }
  623. static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
  624. unsigned int devfn, int where)
  625. {
  626. struct mtk_pcie *pcie = bus->sysdata;
  627. writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
  628. bus->number), pcie->base + PCIE_CFG_ADDR);
  629. return pcie->base + PCIE_CFG_DATA + (where & 3);
  630. }
  631. static struct pci_ops mtk_pcie_ops = {
  632. .map_bus = mtk_pcie_map_bus,
  633. .read = pci_generic_config_read,
  634. .write = pci_generic_config_write,
  635. };
  636. static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
  637. {
  638. struct mtk_pcie *pcie = port->pcie;
  639. u32 func = PCI_FUNC(port->slot);
  640. u32 slot = PCI_SLOT(port->slot << 3);
  641. u32 val;
  642. int err;
  643. /* assert port PERST_N */
  644. val = readl(pcie->base + PCIE_SYS_CFG);
  645. val |= PCIE_PORT_PERST(port->slot);
  646. writel(val, pcie->base + PCIE_SYS_CFG);
  647. /* de-assert port PERST_N */
  648. val = readl(pcie->base + PCIE_SYS_CFG);
  649. val &= ~PCIE_PORT_PERST(port->slot);
  650. writel(val, pcie->base + PCIE_SYS_CFG);
  651. /* 100ms timeout value should be enough for Gen1/2 training */
  652. err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
  653. !!(val & PCIE_PORT_LINKUP), 20,
  654. 100 * USEC_PER_MSEC);
  655. if (err)
  656. return -ETIMEDOUT;
  657. /* enable interrupt */
  658. val = readl(pcie->base + PCIE_INT_ENABLE);
  659. val |= PCIE_PORT_INT_EN(port->slot);
  660. writel(val, pcie->base + PCIE_INT_ENABLE);
  661. /* map to all DDR region. We need to set it before cfg operation. */
  662. writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
  663. port->base + PCIE_BAR0_SETUP);
  664. /* configure class code and revision ID */
  665. writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
  666. /* configure FC credit */
  667. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  668. pcie->base + PCIE_CFG_ADDR);
  669. val = readl(pcie->base + PCIE_CFG_DATA);
  670. val &= ~PCIE_FC_CREDIT_MASK;
  671. val |= PCIE_FC_CREDIT_VAL(0x806c);
  672. writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
  673. pcie->base + PCIE_CFG_ADDR);
  674. writel(val, pcie->base + PCIE_CFG_DATA);
  675. /* configure RC FTS number to 250 when it leaves L0s */
  676. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  677. pcie->base + PCIE_CFG_ADDR);
  678. val = readl(pcie->base + PCIE_CFG_DATA);
  679. val &= ~PCIE_FTS_NUM_MASK;
  680. val |= PCIE_FTS_NUM_L0(0x50);
  681. writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
  682. pcie->base + PCIE_CFG_ADDR);
  683. writel(val, pcie->base + PCIE_CFG_DATA);
  684. return 0;
  685. }
  686. static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
  687. {
  688. struct mtk_pcie *pcie = port->pcie;
  689. struct device *dev = pcie->dev;
  690. int err;
  691. err = clk_prepare_enable(port->sys_ck);
  692. if (err) {
  693. dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
  694. goto err_sys_clk;
  695. }
  696. err = clk_prepare_enable(port->ahb_ck);
  697. if (err) {
  698. dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
  699. goto err_ahb_clk;
  700. }
  701. err = clk_prepare_enable(port->aux_ck);
  702. if (err) {
  703. dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
  704. goto err_aux_clk;
  705. }
  706. err = clk_prepare_enable(port->axi_ck);
  707. if (err) {
  708. dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
  709. goto err_axi_clk;
  710. }
  711. err = clk_prepare_enable(port->obff_ck);
  712. if (err) {
  713. dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
  714. goto err_obff_clk;
  715. }
  716. err = clk_prepare_enable(port->pipe_ck);
  717. if (err) {
  718. dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
  719. goto err_pipe_clk;
  720. }
  721. reset_control_assert(port->reset);
  722. reset_control_deassert(port->reset);
  723. err = phy_init(port->phy);
  724. if (err) {
  725. dev_err(dev, "failed to initialize port%d phy\n", port->slot);
  726. goto err_phy_init;
  727. }
  728. err = phy_power_on(port->phy);
  729. if (err) {
  730. dev_err(dev, "failed to power on port%d phy\n", port->slot);
  731. goto err_phy_on;
  732. }
  733. if (!pcie->soc->startup(port))
  734. return;
  735. dev_info(dev, "Port%d link down\n", port->slot);
  736. phy_power_off(port->phy);
  737. err_phy_on:
  738. phy_exit(port->phy);
  739. err_phy_init:
  740. clk_disable_unprepare(port->pipe_ck);
  741. err_pipe_clk:
  742. clk_disable_unprepare(port->obff_ck);
  743. err_obff_clk:
  744. clk_disable_unprepare(port->axi_ck);
  745. err_axi_clk:
  746. clk_disable_unprepare(port->aux_ck);
  747. err_aux_clk:
  748. clk_disable_unprepare(port->ahb_ck);
  749. err_ahb_clk:
  750. clk_disable_unprepare(port->sys_ck);
  751. err_sys_clk:
  752. mtk_pcie_port_free(port);
  753. }
  754. static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
  755. struct device_node *node,
  756. int slot)
  757. {
  758. struct mtk_pcie_port *port;
  759. struct device *dev = pcie->dev;
  760. struct platform_device *pdev = to_platform_device(dev);
  761. char name[10];
  762. int err;
  763. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  764. if (!port)
  765. return -ENOMEM;
  766. snprintf(name, sizeof(name), "port%d", slot);
  767. port->base = devm_platform_ioremap_resource_byname(pdev, name);
  768. if (IS_ERR(port->base)) {
  769. dev_err(dev, "failed to map port%d base\n", slot);
  770. return PTR_ERR(port->base);
  771. }
  772. snprintf(name, sizeof(name), "sys_ck%d", slot);
  773. port->sys_ck = devm_clk_get(dev, name);
  774. if (IS_ERR(port->sys_ck)) {
  775. dev_err(dev, "failed to get sys_ck%d clock\n", slot);
  776. return PTR_ERR(port->sys_ck);
  777. }
  778. /* sys_ck might be divided into the following parts in some chips */
  779. snprintf(name, sizeof(name), "ahb_ck%d", slot);
  780. port->ahb_ck = devm_clk_get_optional(dev, name);
  781. if (IS_ERR(port->ahb_ck))
  782. return PTR_ERR(port->ahb_ck);
  783. snprintf(name, sizeof(name), "axi_ck%d", slot);
  784. port->axi_ck = devm_clk_get_optional(dev, name);
  785. if (IS_ERR(port->axi_ck))
  786. return PTR_ERR(port->axi_ck);
  787. snprintf(name, sizeof(name), "aux_ck%d", slot);
  788. port->aux_ck = devm_clk_get_optional(dev, name);
  789. if (IS_ERR(port->aux_ck))
  790. return PTR_ERR(port->aux_ck);
  791. snprintf(name, sizeof(name), "obff_ck%d", slot);
  792. port->obff_ck = devm_clk_get_optional(dev, name);
  793. if (IS_ERR(port->obff_ck))
  794. return PTR_ERR(port->obff_ck);
  795. snprintf(name, sizeof(name), "pipe_ck%d", slot);
  796. port->pipe_ck = devm_clk_get_optional(dev, name);
  797. if (IS_ERR(port->pipe_ck))
  798. return PTR_ERR(port->pipe_ck);
  799. snprintf(name, sizeof(name), "pcie-rst%d", slot);
  800. port->reset = devm_reset_control_get_optional_exclusive(dev, name);
  801. if (PTR_ERR(port->reset) == -EPROBE_DEFER)
  802. return PTR_ERR(port->reset);
  803. /* some platforms may use default PHY setting */
  804. snprintf(name, sizeof(name), "pcie-phy%d", slot);
  805. port->phy = devm_phy_optional_get(dev, name);
  806. if (IS_ERR(port->phy))
  807. return PTR_ERR(port->phy);
  808. port->slot = slot;
  809. port->pcie = pcie;
  810. if (pcie->soc->setup_irq) {
  811. err = pcie->soc->setup_irq(port, node);
  812. if (err)
  813. return err;
  814. }
  815. INIT_LIST_HEAD(&port->list);
  816. list_add_tail(&port->list, &pcie->ports);
  817. return 0;
  818. }
  819. static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
  820. {
  821. struct device *dev = pcie->dev;
  822. struct platform_device *pdev = to_platform_device(dev);
  823. struct resource *regs;
  824. struct device_node *cfg_node;
  825. int err;
  826. /* get shared registers, which are optional */
  827. regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
  828. if (regs) {
  829. pcie->base = devm_ioremap_resource(dev, regs);
  830. if (IS_ERR(pcie->base))
  831. return PTR_ERR(pcie->base);
  832. }
  833. cfg_node = of_find_compatible_node(NULL, NULL,
  834. "mediatek,generic-pciecfg");
  835. if (cfg_node) {
  836. pcie->cfg = syscon_node_to_regmap(cfg_node);
  837. of_node_put(cfg_node);
  838. if (IS_ERR(pcie->cfg))
  839. return PTR_ERR(pcie->cfg);
  840. }
  841. pcie->free_ck = devm_clk_get(dev, "free_ck");
  842. if (IS_ERR(pcie->free_ck)) {
  843. if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
  844. return -EPROBE_DEFER;
  845. pcie->free_ck = NULL;
  846. }
  847. pm_runtime_enable(dev);
  848. pm_runtime_get_sync(dev);
  849. /* enable top level clock */
  850. err = clk_prepare_enable(pcie->free_ck);
  851. if (err) {
  852. dev_err(dev, "failed to enable free_ck\n");
  853. goto err_free_ck;
  854. }
  855. return 0;
  856. err_free_ck:
  857. pm_runtime_put_sync(dev);
  858. pm_runtime_disable(dev);
  859. return err;
  860. }
  861. static int mtk_pcie_setup(struct mtk_pcie *pcie)
  862. {
  863. struct device *dev = pcie->dev;
  864. struct device_node *node = dev->of_node, *child;
  865. struct mtk_pcie_port *port, *tmp;
  866. int err, slot;
  867. slot = of_get_pci_domain_nr(dev->of_node);
  868. if (slot < 0) {
  869. for_each_available_child_of_node(node, child) {
  870. err = of_pci_get_devfn(child);
  871. if (err < 0) {
  872. dev_err(dev, "failed to get devfn: %d\n", err);
  873. goto error_put_node;
  874. }
  875. slot = PCI_SLOT(err);
  876. err = mtk_pcie_parse_port(pcie, child, slot);
  877. if (err)
  878. goto error_put_node;
  879. }
  880. } else {
  881. err = mtk_pcie_parse_port(pcie, node, slot);
  882. if (err)
  883. return err;
  884. }
  885. err = mtk_pcie_subsys_powerup(pcie);
  886. if (err)
  887. return err;
  888. /* enable each port, and then check link status */
  889. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  890. mtk_pcie_enable_port(port);
  891. /* power down PCIe subsys if slots are all empty (link down) */
  892. if (list_empty(&pcie->ports))
  893. mtk_pcie_subsys_powerdown(pcie);
  894. return 0;
  895. error_put_node:
  896. of_node_put(child);
  897. return err;
  898. }
  899. static int mtk_pcie_probe(struct platform_device *pdev)
  900. {
  901. struct device *dev = &pdev->dev;
  902. struct mtk_pcie *pcie;
  903. struct pci_host_bridge *host;
  904. int err;
  905. host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  906. if (!host)
  907. return -ENOMEM;
  908. pcie = pci_host_bridge_priv(host);
  909. pcie->dev = dev;
  910. pcie->soc = of_device_get_match_data(dev);
  911. platform_set_drvdata(pdev, pcie);
  912. INIT_LIST_HEAD(&pcie->ports);
  913. err = mtk_pcie_setup(pcie);
  914. if (err)
  915. return err;
  916. host->ops = pcie->soc->ops;
  917. host->sysdata = pcie;
  918. host->msi_domain = pcie->soc->no_msi;
  919. err = pci_host_probe(host);
  920. if (err)
  921. goto put_resources;
  922. return 0;
  923. put_resources:
  924. if (!list_empty(&pcie->ports))
  925. mtk_pcie_put_resources(pcie);
  926. return err;
  927. }
  928. static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
  929. {
  930. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  931. struct list_head *windows = &host->windows;
  932. pci_free_resource_list(windows);
  933. }
  934. static int mtk_pcie_remove(struct platform_device *pdev)
  935. {
  936. struct mtk_pcie *pcie = platform_get_drvdata(pdev);
  937. struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
  938. pci_stop_root_bus(host->bus);
  939. pci_remove_root_bus(host->bus);
  940. mtk_pcie_free_resources(pcie);
  941. mtk_pcie_irq_teardown(pcie);
  942. mtk_pcie_put_resources(pcie);
  943. return 0;
  944. }
  945. static int mtk_pcie_suspend_noirq(struct device *dev)
  946. {
  947. struct mtk_pcie *pcie = dev_get_drvdata(dev);
  948. struct mtk_pcie_port *port;
  949. if (list_empty(&pcie->ports))
  950. return 0;
  951. list_for_each_entry(port, &pcie->ports, list) {
  952. clk_disable_unprepare(port->pipe_ck);
  953. clk_disable_unprepare(port->obff_ck);
  954. clk_disable_unprepare(port->axi_ck);
  955. clk_disable_unprepare(port->aux_ck);
  956. clk_disable_unprepare(port->ahb_ck);
  957. clk_disable_unprepare(port->sys_ck);
  958. phy_power_off(port->phy);
  959. phy_exit(port->phy);
  960. }
  961. clk_disable_unprepare(pcie->free_ck);
  962. return 0;
  963. }
  964. static int mtk_pcie_resume_noirq(struct device *dev)
  965. {
  966. struct mtk_pcie *pcie = dev_get_drvdata(dev);
  967. struct mtk_pcie_port *port, *tmp;
  968. if (list_empty(&pcie->ports))
  969. return 0;
  970. clk_prepare_enable(pcie->free_ck);
  971. list_for_each_entry_safe(port, tmp, &pcie->ports, list)
  972. mtk_pcie_enable_port(port);
  973. /* In case of EP was removed while system suspend. */
  974. if (list_empty(&pcie->ports))
  975. clk_disable_unprepare(pcie->free_ck);
  976. return 0;
  977. }
  978. static const struct dev_pm_ops mtk_pcie_pm_ops = {
  979. NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
  980. mtk_pcie_resume_noirq)
  981. };
  982. static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
  983. .no_msi = true,
  984. .ops = &mtk_pcie_ops,
  985. .startup = mtk_pcie_startup_port,
  986. };
  987. static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
  988. .ops = &mtk_pcie_ops_v2,
  989. .startup = mtk_pcie_startup_port_v2,
  990. .setup_irq = mtk_pcie_setup_irq,
  991. };
  992. static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
  993. .need_fix_class_id = true,
  994. .ops = &mtk_pcie_ops_v2,
  995. .startup = mtk_pcie_startup_port_v2,
  996. .setup_irq = mtk_pcie_setup_irq,
  997. };
  998. static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
  999. .need_fix_class_id = true,
  1000. .need_fix_device_id = true,
  1001. .device_id = PCI_DEVICE_ID_MEDIATEK_7629,
  1002. .ops = &mtk_pcie_ops_v2,
  1003. .startup = mtk_pcie_startup_port_v2,
  1004. .setup_irq = mtk_pcie_setup_irq,
  1005. };
  1006. static const struct of_device_id mtk_pcie_ids[] = {
  1007. { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
  1008. { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
  1009. { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
  1010. { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
  1011. { .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
  1012. {},
  1013. };
  1014. MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
  1015. static struct platform_driver mtk_pcie_driver = {
  1016. .probe = mtk_pcie_probe,
  1017. .remove = mtk_pcie_remove,
  1018. .driver = {
  1019. .name = "mtk-pcie",
  1020. .of_match_table = mtk_pcie_ids,
  1021. .suppress_bind_attrs = true,
  1022. .pm = &mtk_pcie_pm_ops,
  1023. },
  1024. };
  1025. module_platform_driver(mtk_pcie_driver);
  1026. MODULE_LICENSE("GPL v2");