pcie-apple.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe host bridge driver for Apple system-on-chips.
  4. *
  5. * The HW is ECAM compliant, so once the controller is initialized,
  6. * the driver mostly deals MSI mapping and handling of per-port
  7. * interrupts (INTx, management and error signals).
  8. *
  9. * Initialization requires enabling power and clocks, along with a
  10. * number of register pokes.
  11. *
  12. * Copyright (C) 2021 Alyssa Rosenzweig <[email protected]>
  13. * Copyright (C) 2021 Google LLC
  14. * Copyright (C) 2021 Corellium LLC
  15. * Copyright (C) 2021 Mark Kettenis <[email protected]>
  16. *
  17. * Author: Alyssa Rosenzweig <[email protected]>
  18. * Author: Marc Zyngier <[email protected]>
  19. */
  20. #include <linux/gpio/consumer.h>
  21. #include <linux/kernel.h>
  22. #include <linux/iopoll.h>
  23. #include <linux/irqchip/chained_irq.h>
  24. #include <linux/irqdomain.h>
  25. #include <linux/list.h>
  26. #include <linux/module.h>
  27. #include <linux/msi.h>
  28. #include <linux/notifier.h>
  29. #include <linux/of_irq.h>
  30. #include <linux/pci-ecam.h>
  31. #define CORE_RC_PHYIF_CTL 0x00024
  32. #define CORE_RC_PHYIF_CTL_RUN BIT(0)
  33. #define CORE_RC_PHYIF_STAT 0x00028
  34. #define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
  35. #define CORE_RC_CTL 0x00050
  36. #define CORE_RC_CTL_RUN BIT(0)
  37. #define CORE_RC_STAT 0x00058
  38. #define CORE_RC_STAT_READY BIT(0)
  39. #define CORE_FABRIC_STAT 0x04000
  40. #define CORE_FABRIC_STAT_MASK 0x001F001F
  41. #define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
  42. #define CORE_LANE_CFG_REFCLK0REQ BIT(0)
  43. #define CORE_LANE_CFG_REFCLK1REQ BIT(1)
  44. #define CORE_LANE_CFG_REFCLK0ACK BIT(2)
  45. #define CORE_LANE_CFG_REFCLK1ACK BIT(3)
  46. #define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
  47. #define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
  48. #define CORE_LANE_CTL_CFGACC BIT(15)
  49. #define PORT_LTSSMCTL 0x00080
  50. #define PORT_LTSSMCTL_START BIT(0)
  51. #define PORT_INTSTAT 0x00100
  52. #define PORT_INT_TUNNEL_ERR 31
  53. #define PORT_INT_CPL_TIMEOUT 23
  54. #define PORT_INT_RID2SID_MAPERR 22
  55. #define PORT_INT_CPL_ABORT 21
  56. #define PORT_INT_MSI_BAD_DATA 19
  57. #define PORT_INT_MSI_ERR 18
  58. #define PORT_INT_REQADDR_GT32 17
  59. #define PORT_INT_AF_TIMEOUT 15
  60. #define PORT_INT_LINK_DOWN 14
  61. #define PORT_INT_LINK_UP 12
  62. #define PORT_INT_LINK_BWMGMT 11
  63. #define PORT_INT_AER_MASK (15 << 4)
  64. #define PORT_INT_PORT_ERR 4
  65. #define PORT_INT_INTx(i) i
  66. #define PORT_INT_INTx_MASK 15
  67. #define PORT_INTMSK 0x00104
  68. #define PORT_INTMSKSET 0x00108
  69. #define PORT_INTMSKCLR 0x0010c
  70. #define PORT_MSICFG 0x00124
  71. #define PORT_MSICFG_EN BIT(0)
  72. #define PORT_MSICFG_L2MSINUM_SHIFT 4
  73. #define PORT_MSIBASE 0x00128
  74. #define PORT_MSIBASE_1_SHIFT 16
  75. #define PORT_MSIADDR 0x00168
  76. #define PORT_LINKSTS 0x00208
  77. #define PORT_LINKSTS_UP BIT(0)
  78. #define PORT_LINKSTS_BUSY BIT(2)
  79. #define PORT_LINKCMDSTS 0x00210
  80. #define PORT_OUTS_NPREQS 0x00284
  81. #define PORT_OUTS_NPREQS_REQ BIT(24)
  82. #define PORT_OUTS_NPREQS_CPL BIT(16)
  83. #define PORT_RXWR_FIFO 0x00288
  84. #define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
  85. #define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
  86. #define PORT_RXRD_FIFO 0x0028C
  87. #define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
  88. #define PORT_OUTS_CPLS 0x00290
  89. #define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
  90. #define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
  91. #define PORT_APPCLK 0x00800
  92. #define PORT_APPCLK_EN BIT(0)
  93. #define PORT_APPCLK_CGDIS BIT(8)
  94. #define PORT_STATUS 0x00804
  95. #define PORT_STATUS_READY BIT(0)
  96. #define PORT_REFCLK 0x00810
  97. #define PORT_REFCLK_EN BIT(0)
  98. #define PORT_REFCLK_CGDIS BIT(8)
  99. #define PORT_PERST 0x00814
  100. #define PORT_PERST_OFF BIT(0)
  101. #define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
  102. #define PORT_RID2SID_VALID BIT(31)
  103. #define PORT_RID2SID_SID_SHIFT 16
  104. #define PORT_RID2SID_BUS_SHIFT 8
  105. #define PORT_RID2SID_DEV_SHIFT 3
  106. #define PORT_RID2SID_FUNC_SHIFT 0
  107. #define PORT_OUTS_PREQS_HDR 0x00980
  108. #define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
  109. #define PORT_OUTS_PREQS_DATA 0x00984
  110. #define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
  111. #define PORT_TUNCTRL 0x00988
  112. #define PORT_TUNCTRL_PERST_ON BIT(0)
  113. #define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
  114. #define PORT_TUNSTAT 0x0098c
  115. #define PORT_TUNSTAT_PERST_ON BIT(0)
  116. #define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
  117. #define PORT_PREFMEM_ENABLE 0x00994
  118. #define MAX_RID2SID 64
  119. /*
  120. * The doorbell address is set to 0xfffff000, which by convention
  121. * matches what MacOS does, and it is possible to use any other
  122. * address (in the bottom 4GB, as the base register is only 32bit).
  123. * However, it has to be excluded from the IOVA range, and the DART
  124. * driver has to know about it.
  125. */
  126. #define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
  127. struct apple_pcie {
  128. struct mutex lock;
  129. struct device *dev;
  130. void __iomem *base;
  131. struct irq_domain *domain;
  132. unsigned long *bitmap;
  133. struct list_head ports;
  134. struct completion event;
  135. struct irq_fwspec fwspec;
  136. u32 nvecs;
  137. };
  138. struct apple_pcie_port {
  139. struct apple_pcie *pcie;
  140. struct device_node *np;
  141. void __iomem *base;
  142. struct irq_domain *domain;
  143. struct list_head entry;
  144. DECLARE_BITMAP(sid_map, MAX_RID2SID);
  145. int sid_map_sz;
  146. int idx;
  147. };
  148. static void rmw_set(u32 set, void __iomem *addr)
  149. {
  150. writel_relaxed(readl_relaxed(addr) | set, addr);
  151. }
  152. static void rmw_clear(u32 clr, void __iomem *addr)
  153. {
  154. writel_relaxed(readl_relaxed(addr) & ~clr, addr);
  155. }
  156. static void apple_msi_top_irq_mask(struct irq_data *d)
  157. {
  158. pci_msi_mask_irq(d);
  159. irq_chip_mask_parent(d);
  160. }
  161. static void apple_msi_top_irq_unmask(struct irq_data *d)
  162. {
  163. pci_msi_unmask_irq(d);
  164. irq_chip_unmask_parent(d);
  165. }
  166. static struct irq_chip apple_msi_top_chip = {
  167. .name = "PCIe MSI",
  168. .irq_mask = apple_msi_top_irq_mask,
  169. .irq_unmask = apple_msi_top_irq_unmask,
  170. .irq_eoi = irq_chip_eoi_parent,
  171. .irq_set_affinity = irq_chip_set_affinity_parent,
  172. .irq_set_type = irq_chip_set_type_parent,
  173. };
  174. static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
  175. {
  176. msg->address_hi = upper_32_bits(DOORBELL_ADDR);
  177. msg->address_lo = lower_32_bits(DOORBELL_ADDR);
  178. msg->data = data->hwirq;
  179. }
  180. static struct irq_chip apple_msi_bottom_chip = {
  181. .name = "MSI",
  182. .irq_mask = irq_chip_mask_parent,
  183. .irq_unmask = irq_chip_unmask_parent,
  184. .irq_eoi = irq_chip_eoi_parent,
  185. .irq_set_affinity = irq_chip_set_affinity_parent,
  186. .irq_set_type = irq_chip_set_type_parent,
  187. .irq_compose_msi_msg = apple_msi_compose_msg,
  188. };
  189. static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
  190. unsigned int nr_irqs, void *args)
  191. {
  192. struct apple_pcie *pcie = domain->host_data;
  193. struct irq_fwspec fwspec = pcie->fwspec;
  194. unsigned int i;
  195. int ret, hwirq;
  196. mutex_lock(&pcie->lock);
  197. hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
  198. order_base_2(nr_irqs));
  199. mutex_unlock(&pcie->lock);
  200. if (hwirq < 0)
  201. return -ENOSPC;
  202. fwspec.param[fwspec.param_count - 2] += hwirq;
  203. ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
  204. if (ret)
  205. return ret;
  206. for (i = 0; i < nr_irqs; i++) {
  207. irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
  208. &apple_msi_bottom_chip,
  209. domain->host_data);
  210. }
  211. return 0;
  212. }
  213. static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
  214. unsigned int nr_irqs)
  215. {
  216. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  217. struct apple_pcie *pcie = domain->host_data;
  218. mutex_lock(&pcie->lock);
  219. bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
  220. mutex_unlock(&pcie->lock);
  221. }
  222. static const struct irq_domain_ops apple_msi_domain_ops = {
  223. .alloc = apple_msi_domain_alloc,
  224. .free = apple_msi_domain_free,
  225. };
  226. static struct msi_domain_info apple_msi_info = {
  227. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  228. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
  229. .chip = &apple_msi_top_chip,
  230. };
  231. static void apple_port_irq_mask(struct irq_data *data)
  232. {
  233. struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
  234. writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
  235. }
  236. static void apple_port_irq_unmask(struct irq_data *data)
  237. {
  238. struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
  239. writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
  240. }
  241. static bool hwirq_is_intx(unsigned int hwirq)
  242. {
  243. return BIT(hwirq) & PORT_INT_INTx_MASK;
  244. }
  245. static void apple_port_irq_ack(struct irq_data *data)
  246. {
  247. struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
  248. if (!hwirq_is_intx(data->hwirq))
  249. writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
  250. }
  251. static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
  252. {
  253. /*
  254. * It doesn't seem that there is any way to configure the
  255. * trigger, so assume INTx have to be level (as per the spec),
  256. * and the rest is edge (which looks likely).
  257. */
  258. if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
  259. return -EINVAL;
  260. irqd_set_trigger_type(data, type);
  261. return 0;
  262. }
  263. static struct irq_chip apple_port_irqchip = {
  264. .name = "PCIe",
  265. .irq_ack = apple_port_irq_ack,
  266. .irq_mask = apple_port_irq_mask,
  267. .irq_unmask = apple_port_irq_unmask,
  268. .irq_set_type = apple_port_irq_set_type,
  269. };
  270. static int apple_port_irq_domain_alloc(struct irq_domain *domain,
  271. unsigned int virq, unsigned int nr_irqs,
  272. void *args)
  273. {
  274. struct apple_pcie_port *port = domain->host_data;
  275. struct irq_fwspec *fwspec = args;
  276. int i;
  277. for (i = 0; i < nr_irqs; i++) {
  278. irq_flow_handler_t flow = handle_edge_irq;
  279. unsigned int type = IRQ_TYPE_EDGE_RISING;
  280. if (hwirq_is_intx(fwspec->param[0] + i)) {
  281. flow = handle_level_irq;
  282. type = IRQ_TYPE_LEVEL_HIGH;
  283. }
  284. irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
  285. &apple_port_irqchip, port, flow,
  286. NULL, NULL);
  287. irq_set_irq_type(virq + i, type);
  288. }
  289. return 0;
  290. }
  291. static void apple_port_irq_domain_free(struct irq_domain *domain,
  292. unsigned int virq, unsigned int nr_irqs)
  293. {
  294. int i;
  295. for (i = 0; i < nr_irqs; i++) {
  296. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  297. irq_set_handler(virq + i, NULL);
  298. irq_domain_reset_irq_data(d);
  299. }
  300. }
  301. static const struct irq_domain_ops apple_port_irq_domain_ops = {
  302. .translate = irq_domain_translate_onecell,
  303. .alloc = apple_port_irq_domain_alloc,
  304. .free = apple_port_irq_domain_free,
  305. };
  306. static void apple_port_irq_handler(struct irq_desc *desc)
  307. {
  308. struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
  309. struct irq_chip *chip = irq_desc_get_chip(desc);
  310. unsigned long stat;
  311. int i;
  312. chained_irq_enter(chip, desc);
  313. stat = readl_relaxed(port->base + PORT_INTSTAT);
  314. for_each_set_bit(i, &stat, 32)
  315. generic_handle_domain_irq(port->domain, i);
  316. chained_irq_exit(chip, desc);
  317. }
  318. static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
  319. {
  320. struct fwnode_handle *fwnode = &port->np->fwnode;
  321. unsigned int irq;
  322. /* FIXME: consider moving each interrupt under each port */
  323. irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
  324. port->idx);
  325. if (!irq)
  326. return -ENXIO;
  327. port->domain = irq_domain_create_linear(fwnode, 32,
  328. &apple_port_irq_domain_ops,
  329. port);
  330. if (!port->domain)
  331. return -ENOMEM;
  332. /* Disable all interrupts */
  333. writel_relaxed(~0, port->base + PORT_INTMSKSET);
  334. writel_relaxed(~0, port->base + PORT_INTSTAT);
  335. irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
  336. /* Configure MSI base address */
  337. BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
  338. writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
  339. /* Enable MSIs, shared between all ports */
  340. writel_relaxed(0, port->base + PORT_MSIBASE);
  341. writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
  342. PORT_MSICFG_EN, port->base + PORT_MSICFG);
  343. return 0;
  344. }
  345. static irqreturn_t apple_pcie_port_irq(int irq, void *data)
  346. {
  347. struct apple_pcie_port *port = data;
  348. unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
  349. switch (hwirq) {
  350. case PORT_INT_LINK_UP:
  351. dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
  352. port->np);
  353. complete_all(&port->pcie->event);
  354. break;
  355. case PORT_INT_LINK_DOWN:
  356. dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
  357. port->np);
  358. break;
  359. default:
  360. return IRQ_NONE;
  361. }
  362. return IRQ_HANDLED;
  363. }
  364. static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
  365. {
  366. static struct {
  367. unsigned int hwirq;
  368. const char *name;
  369. } port_irqs[] = {
  370. { PORT_INT_LINK_UP, "Link up", },
  371. { PORT_INT_LINK_DOWN, "Link down", },
  372. };
  373. int i;
  374. for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
  375. struct irq_fwspec fwspec = {
  376. .fwnode = &port->np->fwnode,
  377. .param_count = 1,
  378. .param = {
  379. [0] = port_irqs[i].hwirq,
  380. },
  381. };
  382. unsigned int irq;
  383. int ret;
  384. irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
  385. &fwspec);
  386. if (WARN_ON(!irq))
  387. continue;
  388. ret = request_irq(irq, apple_pcie_port_irq, 0,
  389. port_irqs[i].name, port);
  390. WARN_ON(ret);
  391. }
  392. return 0;
  393. }
  394. static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
  395. struct apple_pcie_port *port)
  396. {
  397. u32 stat;
  398. int res;
  399. res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
  400. stat & CORE_RC_PHYIF_STAT_REFCLK,
  401. 100, 50000);
  402. if (res < 0)
  403. return res;
  404. rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
  405. rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
  406. res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
  407. stat, stat & CORE_LANE_CFG_REFCLK0ACK,
  408. 100, 50000);
  409. if (res < 0)
  410. return res;
  411. rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
  412. res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
  413. stat, stat & CORE_LANE_CFG_REFCLK1ACK,
  414. 100, 50000);
  415. if (res < 0)
  416. return res;
  417. rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
  418. rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
  419. rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
  420. return 0;
  421. }
  422. static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
  423. int idx, u32 val)
  424. {
  425. writel_relaxed(val, port->base + PORT_RID2SID(idx));
  426. /* Read back to ensure completion of the write */
  427. return readl_relaxed(port->base + PORT_RID2SID(idx));
  428. }
  429. static int apple_pcie_setup_port(struct apple_pcie *pcie,
  430. struct device_node *np)
  431. {
  432. struct platform_device *platform = to_platform_device(pcie->dev);
  433. struct apple_pcie_port *port;
  434. struct gpio_desc *reset;
  435. u32 stat, idx;
  436. int ret, i;
  437. reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
  438. GPIOD_OUT_LOW, "PERST#");
  439. if (IS_ERR(reset))
  440. return PTR_ERR(reset);
  441. port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
  442. if (!port)
  443. return -ENOMEM;
  444. ret = of_property_read_u32_index(np, "reg", 0, &idx);
  445. if (ret)
  446. return ret;
  447. /* Use the first reg entry to work out the port index */
  448. port->idx = idx >> 11;
  449. port->pcie = pcie;
  450. port->np = np;
  451. port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
  452. if (IS_ERR(port->base))
  453. return PTR_ERR(port->base);
  454. rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
  455. /* Assert PERST# before setting up the clock */
  456. gpiod_set_value(reset, 1);
  457. ret = apple_pcie_setup_refclk(pcie, port);
  458. if (ret < 0)
  459. return ret;
  460. /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
  461. usleep_range(100, 200);
  462. /* Deassert PERST# */
  463. rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
  464. gpiod_set_value(reset, 0);
  465. /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
  466. msleep(100);
  467. ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
  468. stat & PORT_STATUS_READY, 100, 250000);
  469. if (ret < 0) {
  470. dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
  471. return ret;
  472. }
  473. rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
  474. rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
  475. ret = apple_pcie_port_setup_irq(port);
  476. if (ret)
  477. return ret;
  478. /* Reset all RID/SID mappings, and check for RAZ/WI registers */
  479. for (i = 0; i < MAX_RID2SID; i++) {
  480. if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
  481. break;
  482. apple_pcie_rid2sid_write(port, i, 0);
  483. }
  484. dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
  485. port->sid_map_sz = i;
  486. list_add_tail(&port->entry, &pcie->ports);
  487. init_completion(&pcie->event);
  488. ret = apple_pcie_port_register_irqs(port);
  489. WARN_ON(ret);
  490. writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
  491. if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
  492. dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
  493. return 0;
  494. }
  495. static int apple_msi_init(struct apple_pcie *pcie)
  496. {
  497. struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
  498. struct of_phandle_args args = {};
  499. struct irq_domain *parent;
  500. int ret;
  501. ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
  502. "#interrupt-cells", 0, &args);
  503. if (ret)
  504. return ret;
  505. ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
  506. args.args_count + 1, &pcie->nvecs);
  507. if (ret)
  508. return ret;
  509. of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
  510. &pcie->fwspec);
  511. pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
  512. if (!pcie->bitmap)
  513. return -ENOMEM;
  514. parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
  515. if (!parent) {
  516. dev_err(pcie->dev, "failed to find parent domain\n");
  517. return -ENXIO;
  518. }
  519. parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
  520. &apple_msi_domain_ops, pcie);
  521. if (!parent) {
  522. dev_err(pcie->dev, "failed to create IRQ domain\n");
  523. return -ENOMEM;
  524. }
  525. irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
  526. pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
  527. parent);
  528. if (!pcie->domain) {
  529. dev_err(pcie->dev, "failed to create MSI domain\n");
  530. irq_domain_remove(parent);
  531. return -ENOMEM;
  532. }
  533. return 0;
  534. }
  535. static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
  536. {
  537. struct pci_config_window *cfg = pdev->sysdata;
  538. struct apple_pcie *pcie = cfg->priv;
  539. struct pci_dev *port_pdev;
  540. struct apple_pcie_port *port;
  541. /* Find the root port this device is on */
  542. port_pdev = pcie_find_root_port(pdev);
  543. /* If finding the port itself, nothing to do */
  544. if (WARN_ON(!port_pdev) || pdev == port_pdev)
  545. return NULL;
  546. list_for_each_entry(port, &pcie->ports, entry) {
  547. if (port->idx == PCI_SLOT(port_pdev->devfn))
  548. return port;
  549. }
  550. return NULL;
  551. }
  552. static int apple_pcie_add_device(struct apple_pcie_port *port,
  553. struct pci_dev *pdev)
  554. {
  555. u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
  556. int idx, err;
  557. dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
  558. pci_name(pdev->bus->self), port->idx);
  559. err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
  560. "iommu-map-mask", NULL, &sid);
  561. if (err)
  562. return err;
  563. mutex_lock(&port->pcie->lock);
  564. idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
  565. if (idx >= 0) {
  566. apple_pcie_rid2sid_write(port, idx,
  567. PORT_RID2SID_VALID |
  568. (sid << PORT_RID2SID_SID_SHIFT) | rid);
  569. dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
  570. rid, sid, idx);
  571. }
  572. mutex_unlock(&port->pcie->lock);
  573. return idx >= 0 ? 0 : -ENOSPC;
  574. }
  575. static void apple_pcie_release_device(struct apple_pcie_port *port,
  576. struct pci_dev *pdev)
  577. {
  578. u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
  579. int idx;
  580. mutex_lock(&port->pcie->lock);
  581. for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
  582. u32 val;
  583. val = readl_relaxed(port->base + PORT_RID2SID(idx));
  584. if ((val & 0xffff) == rid) {
  585. apple_pcie_rid2sid_write(port, idx, 0);
  586. bitmap_release_region(port->sid_map, idx, 0);
  587. dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
  588. break;
  589. }
  590. }
  591. mutex_unlock(&port->pcie->lock);
  592. }
  593. static int apple_pcie_bus_notifier(struct notifier_block *nb,
  594. unsigned long action,
  595. void *data)
  596. {
  597. struct device *dev = data;
  598. struct pci_dev *pdev = to_pci_dev(dev);
  599. struct apple_pcie_port *port;
  600. int err;
  601. /*
  602. * This is a bit ugly. We assume that if we get notified for
  603. * any PCI device, we must be in charge of it, and that there
  604. * is no other PCI controller in the whole system. It probably
  605. * holds for now, but who knows for how long?
  606. */
  607. port = apple_pcie_get_port(pdev);
  608. if (!port)
  609. return NOTIFY_DONE;
  610. switch (action) {
  611. case BUS_NOTIFY_ADD_DEVICE:
  612. err = apple_pcie_add_device(port, pdev);
  613. if (err)
  614. return notifier_from_errno(err);
  615. break;
  616. case BUS_NOTIFY_DEL_DEVICE:
  617. apple_pcie_release_device(port, pdev);
  618. break;
  619. default:
  620. return NOTIFY_DONE;
  621. }
  622. return NOTIFY_OK;
  623. }
  624. static struct notifier_block apple_pcie_nb = {
  625. .notifier_call = apple_pcie_bus_notifier,
  626. };
  627. static int apple_pcie_init(struct pci_config_window *cfg)
  628. {
  629. struct device *dev = cfg->parent;
  630. struct platform_device *platform = to_platform_device(dev);
  631. struct device_node *of_port;
  632. struct apple_pcie *pcie;
  633. int ret;
  634. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  635. if (!pcie)
  636. return -ENOMEM;
  637. pcie->dev = dev;
  638. mutex_init(&pcie->lock);
  639. pcie->base = devm_platform_ioremap_resource(platform, 1);
  640. if (IS_ERR(pcie->base))
  641. return PTR_ERR(pcie->base);
  642. cfg->priv = pcie;
  643. INIT_LIST_HEAD(&pcie->ports);
  644. ret = apple_msi_init(pcie);
  645. if (ret)
  646. return ret;
  647. for_each_child_of_node(dev->of_node, of_port) {
  648. ret = apple_pcie_setup_port(pcie, of_port);
  649. if (ret) {
  650. dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
  651. of_node_put(of_port);
  652. return ret;
  653. }
  654. }
  655. return 0;
  656. }
  657. static int apple_pcie_probe(struct platform_device *pdev)
  658. {
  659. int ret;
  660. ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
  661. if (ret)
  662. return ret;
  663. ret = pci_host_common_probe(pdev);
  664. if (ret)
  665. bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
  666. return ret;
  667. }
  668. static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
  669. .init = apple_pcie_init,
  670. .pci_ops = {
  671. .map_bus = pci_ecam_map_bus,
  672. .read = pci_generic_config_read,
  673. .write = pci_generic_config_write,
  674. }
  675. };
  676. static const struct of_device_id apple_pcie_of_match[] = {
  677. { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
  678. { }
  679. };
  680. MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
  681. static struct platform_driver apple_pcie_driver = {
  682. .probe = apple_pcie_probe,
  683. .driver = {
  684. .name = "pcie-apple",
  685. .of_match_table = apple_pcie_of_match,
  686. .suppress_bind_attrs = true,
  687. },
  688. };
  689. module_platform_driver(apple_pcie_driver);
  690. MODULE_LICENSE("GPL v2");