pcie-cadence.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2017 Cadence
  3. // Cadence PCIe controller driver.
  4. // Author: Cyrille Pitchen <[email protected]>
  5. #include <linux/kernel.h>
  6. #include "pcie-cadence.h"
  7. void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
  8. {
  9. u32 delay = 0x3;
  10. u32 ltssm_control_cap;
  11. /*
  12. * Set the LTSSM Detect Quiet state min. delay to 2ms.
  13. */
  14. ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
  15. ltssm_control_cap = ((ltssm_control_cap &
  16. ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
  17. CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
  18. cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
  19. }
  20. void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
  21. u32 r, bool is_io,
  22. u64 cpu_addr, u64 pci_addr, size_t size)
  23. {
  24. /*
  25. * roundup_pow_of_two() returns an unsigned long, which is not suited
  26. * for 64bit values.
  27. */
  28. u64 sz = 1ULL << fls64(size - 1);
  29. int nbits = ilog2(sz);
  30. u32 addr0, addr1, desc0, desc1;
  31. if (nbits < 8)
  32. nbits = 8;
  33. /* Set the PCI address */
  34. addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
  35. (lower_32_bits(pci_addr) & GENMASK(31, 8));
  36. addr1 = upper_32_bits(pci_addr);
  37. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
  38. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
  39. /* Set the PCIe header descriptor */
  40. if (is_io)
  41. desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
  42. else
  43. desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
  44. desc1 = 0;
  45. /*
  46. * Whatever Bit [23] is set or not inside DESC0 register of the outbound
  47. * PCIe descriptor, the PCI function number must be set into
  48. * Bits [26:24] of DESC0 anyway.
  49. *
  50. * In Root Complex mode, the function number is always 0 but in Endpoint
  51. * mode, the PCIe controller may support more than one function. This
  52. * function number needs to be set properly into the outbound PCIe
  53. * descriptor.
  54. *
  55. * Besides, setting Bit [23] is mandatory when in Root Complex mode:
  56. * then the driver must provide the bus, resp. device, number in
  57. * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
  58. * number, the device number is always 0 in Root Complex mode.
  59. *
  60. * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
  61. * the PCIe controller will use the captured values for the bus and
  62. * device numbers.
  63. */
  64. if (pcie->is_rc) {
  65. /* The device and function numbers are always 0. */
  66. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
  67. CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
  68. desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
  69. } else {
  70. /*
  71. * Use captured values for bus and device numbers but still
  72. * need to set the function number.
  73. */
  74. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
  75. }
  76. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
  77. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
  78. /* Set the CPU address */
  79. if (pcie->ops->cpu_addr_fixup)
  80. cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
  81. addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
  82. (lower_32_bits(cpu_addr) & GENMASK(31, 8));
  83. addr1 = upper_32_bits(cpu_addr);
  84. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
  85. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
  86. }
  87. void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
  88. u8 busnr, u8 fn,
  89. u32 r, u64 cpu_addr)
  90. {
  91. u32 addr0, addr1, desc0, desc1;
  92. desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
  93. desc1 = 0;
  94. /* See cdns_pcie_set_outbound_region() comments above. */
  95. if (pcie->is_rc) {
  96. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
  97. CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
  98. desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
  99. } else {
  100. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
  101. }
  102. /* Set the CPU address */
  103. if (pcie->ops->cpu_addr_fixup)
  104. cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
  105. addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
  106. (lower_32_bits(cpu_addr) & GENMASK(31, 8));
  107. addr1 = upper_32_bits(cpu_addr);
  108. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
  109. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
  110. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
  111. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
  112. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
  113. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
  114. }
  115. void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
  116. {
  117. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
  118. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
  119. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
  120. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
  121. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
  122. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
  123. }
  124. void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
  125. {
  126. int i = pcie->phy_count;
  127. while (i--) {
  128. phy_power_off(pcie->phy[i]);
  129. phy_exit(pcie->phy[i]);
  130. }
  131. }
  132. int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
  133. {
  134. int ret;
  135. int i;
  136. for (i = 0; i < pcie->phy_count; i++) {
  137. ret = phy_init(pcie->phy[i]);
  138. if (ret < 0)
  139. goto err_phy;
  140. ret = phy_power_on(pcie->phy[i]);
  141. if (ret < 0) {
  142. phy_exit(pcie->phy[i]);
  143. goto err_phy;
  144. }
  145. }
  146. return 0;
  147. err_phy:
  148. while (--i >= 0) {
  149. phy_power_off(pcie->phy[i]);
  150. phy_exit(pcie->phy[i]);
  151. }
  152. return ret;
  153. }
  154. int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
  155. {
  156. struct device_node *np = dev->of_node;
  157. int phy_count;
  158. struct phy **phy;
  159. struct device_link **link;
  160. int i;
  161. int ret;
  162. const char *name;
  163. phy_count = of_property_count_strings(np, "phy-names");
  164. if (phy_count < 1) {
  165. dev_err(dev, "no phy-names. PHY will not be initialized\n");
  166. pcie->phy_count = 0;
  167. return 0;
  168. }
  169. phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
  170. if (!phy)
  171. return -ENOMEM;
  172. link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
  173. if (!link)
  174. return -ENOMEM;
  175. for (i = 0; i < phy_count; i++) {
  176. of_property_read_string_index(np, "phy-names", i, &name);
  177. phy[i] = devm_phy_get(dev, name);
  178. if (IS_ERR(phy[i])) {
  179. ret = PTR_ERR(phy[i]);
  180. goto err_phy;
  181. }
  182. link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
  183. if (!link[i]) {
  184. devm_phy_put(dev, phy[i]);
  185. ret = -EINVAL;
  186. goto err_phy;
  187. }
  188. }
  189. pcie->phy_count = phy_count;
  190. pcie->phy = phy;
  191. pcie->link = link;
  192. ret = cdns_pcie_enable_phy(pcie);
  193. if (ret)
  194. goto err_phy;
  195. return 0;
  196. err_phy:
  197. while (--i >= 0) {
  198. device_link_del(link[i]);
  199. devm_phy_put(dev, phy[i]);
  200. }
  201. return ret;
  202. }
  203. static int cdns_pcie_suspend_noirq(struct device *dev)
  204. {
  205. struct cdns_pcie *pcie = dev_get_drvdata(dev);
  206. cdns_pcie_disable_phy(pcie);
  207. return 0;
  208. }
  209. static int cdns_pcie_resume_noirq(struct device *dev)
  210. {
  211. struct cdns_pcie *pcie = dev_get_drvdata(dev);
  212. int ret;
  213. ret = cdns_pcie_enable_phy(pcie);
  214. if (ret) {
  215. dev_err(dev, "failed to enable phy\n");
  216. return ret;
  217. }
  218. return 0;
  219. }
  220. const struct dev_pm_ops cdns_pcie_pm_ops = {
  221. NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
  222. cdns_pcie_resume_noirq)
  223. };