pcie-layerscape-gen4.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe Gen4 host controller driver for NXP Layerscape SoCs
  4. *
  5. * Copyright 2019-2020 NXP
  6. *
  7. * Author: Zhiqiang Hou <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/init.h>
  12. #include <linux/of_pci.h>
  13. #include <linux/of_platform.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/of_address.h>
  16. #include <linux/pci.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/resource.h>
  19. #include <linux/mfd/syscon.h>
  20. #include <linux/regmap.h>
  21. #include "pcie-mobiveil.h"
  22. /* LUT and PF control registers */
  23. #define PCIE_LUT_OFF 0x80000
  24. #define PCIE_PF_OFF 0xc0000
  25. #define PCIE_PF_INT_STAT 0x18
  26. #define PF_INT_STAT_PABRST BIT(31)
  27. #define PCIE_PF_DBG 0x7fc
  28. #define PF_DBG_LTSSM_MASK 0x3f
  29. #define PF_DBG_LTSSM_L0 0x2d /* L0 state */
  30. #define PF_DBG_WE BIT(31)
  31. #define PF_DBG_PABR BIT(27)
  32. #define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev)
  33. struct ls_g4_pcie {
  34. struct mobiveil_pcie pci;
  35. struct delayed_work dwork;
  36. int irq;
  37. };
  38. static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off)
  39. {
  40. return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
  41. }
  42. static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
  43. u32 off, u32 val)
  44. {
  45. iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
  46. }
  47. static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
  48. {
  49. struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
  50. u32 state;
  51. state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
  52. state = state & PF_DBG_LTSSM_MASK;
  53. if (state == PF_DBG_LTSSM_L0)
  54. return 1;
  55. return 0;
  56. }
  57. static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
  58. {
  59. struct mobiveil_pcie *mv_pci = &pcie->pci;
  60. mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
  61. }
  62. static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie)
  63. {
  64. struct mobiveil_pcie *mv_pci = &pcie->pci;
  65. u32 val;
  66. /* Clear the interrupt status */
  67. mobiveil_csr_writel(mv_pci, 0xffffffff, PAB_INTP_AMBA_MISC_STAT);
  68. val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
  69. PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
  70. mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
  71. }
  72. static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie)
  73. {
  74. struct mobiveil_pcie *mv_pci = &pcie->pci;
  75. struct device *dev = &mv_pci->pdev->dev;
  76. u32 val, act_stat;
  77. int to = 100;
  78. /* Poll for pab_csb_reset to set and PAB activity to clear */
  79. do {
  80. usleep_range(10, 15);
  81. val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT);
  82. act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
  83. } while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
  84. if (to < 0) {
  85. dev_err(dev, "Poll PABRST&PABACT timeout\n");
  86. return -EIO;
  87. }
  88. /* clear PEX_RESET bit in PEX_PF0_DBG register */
  89. val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
  90. val |= PF_DBG_WE;
  91. ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
  92. val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
  93. val |= PF_DBG_PABR;
  94. ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
  95. val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
  96. val &= ~PF_DBG_WE;
  97. ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
  98. mobiveil_host_init(mv_pci, true);
  99. to = 100;
  100. while (!ls_g4_pcie_link_up(mv_pci) && to--)
  101. usleep_range(200, 250);
  102. if (to < 0) {
  103. dev_err(dev, "PCIe link training timeout\n");
  104. return -EIO;
  105. }
  106. return 0;
  107. }
  108. static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id)
  109. {
  110. struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id;
  111. struct mobiveil_pcie *mv_pci = &pcie->pci;
  112. u32 val;
  113. val = mobiveil_csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
  114. if (!val)
  115. return IRQ_NONE;
  116. if (val & PAB_INTP_RESET) {
  117. ls_g4_pcie_disable_interrupt(pcie);
  118. schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
  119. }
  120. mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
  121. return IRQ_HANDLED;
  122. }
  123. static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
  124. {
  125. struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci);
  126. struct platform_device *pdev = mv_pci->pdev;
  127. struct device *dev = &pdev->dev;
  128. int ret;
  129. pcie->irq = platform_get_irq_byname(pdev, "intr");
  130. if (pcie->irq < 0)
  131. return pcie->irq;
  132. ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr,
  133. IRQF_SHARED, pdev->name, pcie);
  134. if (ret) {
  135. dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
  136. return ret;
  137. }
  138. return 0;
  139. }
  140. static void ls_g4_pcie_reset(struct work_struct *work)
  141. {
  142. struct delayed_work *dwork = container_of(work, struct delayed_work,
  143. work);
  144. struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
  145. struct mobiveil_pcie *mv_pci = &pcie->pci;
  146. u16 ctrl;
  147. ctrl = mobiveil_csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
  148. ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
  149. mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
  150. if (!ls_g4_pcie_reinit_hw(pcie))
  151. return;
  152. ls_g4_pcie_enable_interrupt(pcie);
  153. }
  154. static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
  155. .interrupt_init = ls_g4_pcie_interrupt_init,
  156. };
  157. static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = {
  158. .link_up = ls_g4_pcie_link_up,
  159. };
  160. static int __init ls_g4_pcie_probe(struct platform_device *pdev)
  161. {
  162. struct device *dev = &pdev->dev;
  163. struct pci_host_bridge *bridge;
  164. struct mobiveil_pcie *mv_pci;
  165. struct ls_g4_pcie *pcie;
  166. struct device_node *np = dev->of_node;
  167. int ret;
  168. if (!of_parse_phandle(np, "msi-parent", 0)) {
  169. dev_err(dev, "Failed to find msi-parent\n");
  170. return -EINVAL;
  171. }
  172. bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
  173. if (!bridge)
  174. return -ENOMEM;
  175. pcie = pci_host_bridge_priv(bridge);
  176. mv_pci = &pcie->pci;
  177. mv_pci->pdev = pdev;
  178. mv_pci->ops = &ls_g4_pcie_pab_ops;
  179. mv_pci->rp.ops = &ls_g4_pcie_rp_ops;
  180. mv_pci->rp.bridge = bridge;
  181. platform_set_drvdata(pdev, pcie);
  182. INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset);
  183. ret = mobiveil_pcie_host_probe(mv_pci);
  184. if (ret) {
  185. dev_err(dev, "Fail to probe\n");
  186. return ret;
  187. }
  188. ls_g4_pcie_enable_interrupt(pcie);
  189. return 0;
  190. }
  191. static const struct of_device_id ls_g4_pcie_of_match[] = {
  192. { .compatible = "fsl,lx2160a-pcie", },
  193. { },
  194. };
  195. static struct platform_driver ls_g4_pcie_driver = {
  196. .driver = {
  197. .name = "layerscape-pcie-gen4",
  198. .of_match_table = ls_g4_pcie_of_match,
  199. .suppress_bind_attrs = true,
  200. },
  201. };
  202. builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);