pcie-qcom.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Qualcomm PCIe root complex driver
  4. *
  5. * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  6. * Copyright 2015 Linaro Limited.
  7. *
  8. * Author: Stanimir Varbanov <[email protected]>
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/crc8.h>
  12. #include <linux/delay.h>
  13. #include <linux/gpio/consumer.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/of_device.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/pci.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/phy/phy.h>
  25. #include <linux/regulator/consumer.h>
  26. #include <linux/reset.h>
  27. #include <linux/slab.h>
  28. #include <linux/types.h>
  29. #include "../../pci.h"
  30. #include "pcie-designware.h"
  31. /* PARF registers */
  32. #define PARF_SYS_CTRL 0x00
  33. #define PARF_PM_CTRL 0x20
  34. #define PARF_PCS_DEEMPH 0x34
  35. #define PARF_PCS_SWING 0x38
  36. #define PARF_PHY_CTRL 0x40
  37. #define PARF_PHY_REFCLK 0x4c
  38. #define PARF_CONFIG_BITS 0x50
  39. #define PARF_DBI_BASE_ADDR 0x168
  40. #define PARF_MHI_CLOCK_RESET_CTRL 0x174
  41. #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
  42. #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
  43. #define PARF_Q2A_FLUSH 0x1ac
  44. #define PARF_LTSSM 0x1b0
  45. #define PARF_SID_OFFSET 0x234
  46. #define PARF_BDF_TRANSLATE_CFG 0x24c
  47. #define PARF_SLV_ADDR_SPACE_SIZE 0x358
  48. #define PARF_DEVICE_TYPE 0x1000
  49. #define PARF_BDF_TO_SID_TABLE_N 0x2000
  50. /* ELBI registers */
  51. #define ELBI_SYS_CTRL 0x04
  52. /* DBI registers */
  53. #define AXI_MSTR_RESP_COMP_CTRL0 0x818
  54. #define AXI_MSTR_RESP_COMP_CTRL1 0x81c
  55. /* PARF_SYS_CTRL register fields */
  56. #define MST_WAKEUP_EN BIT(13)
  57. #define SLV_WAKEUP_EN BIT(12)
  58. #define MSTR_ACLK_CGC_DIS BIT(10)
  59. #define SLV_ACLK_CGC_DIS BIT(9)
  60. #define CORE_CLK_CGC_DIS BIT(6)
  61. #define AUX_PWR_DET BIT(4)
  62. #define L23_CLK_RMV_DIS BIT(2)
  63. #define L1_CLK_RMV_DIS BIT(1)
  64. /* PARF_PM_CTRL register fields */
  65. #define REQ_NOT_ENTR_L1 BIT(5)
  66. /* PARF_PCS_DEEMPH register fields */
  67. #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
  68. #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
  69. #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
  70. /* PARF_PCS_SWING register fields */
  71. #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
  72. #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
  73. /* PARF_PHY_CTRL register fields */
  74. #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
  75. #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
  76. /* PARF_PHY_REFCLK register fields */
  77. #define PHY_REFCLK_SSP_EN BIT(16)
  78. #define PHY_REFCLK_USE_PAD BIT(12)
  79. /* PARF_CONFIG_BITS register fields */
  80. #define PHY_RX0_EQ(x) ((x) << 24)
  81. /* PARF_SLV_ADDR_SPACE_SIZE register value */
  82. #define SLV_ADDR_SPACE_SZ 0x10000000
  83. /* PARF_MHI_CLOCK_RESET_CTRL register fields */
  84. #define AHB_CLK_EN BIT(0)
  85. #define MSTR_AXI_CLK_EN BIT(1)
  86. #define BYPASS BIT(4)
  87. /* PARF_DEVICE_TYPE register fields */
  88. #define DEVICE_TYPE_RC 0x4
  89. /* ELBI_SYS_CTRL register fields */
  90. #define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
  91. /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
  92. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
  93. #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
  94. /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
  95. #define CFG_BRIDGE_SB_INIT BIT(0)
  96. /* PCI_EXP_SLTCAP register fields */
  97. #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
  98. #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
  99. #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
  100. PCI_EXP_SLTCAP_PCP | \
  101. PCI_EXP_SLTCAP_MRLSP | \
  102. PCI_EXP_SLTCAP_AIP | \
  103. PCI_EXP_SLTCAP_PIP | \
  104. PCI_EXP_SLTCAP_HPS | \
  105. PCI_EXP_SLTCAP_HPC | \
  106. PCI_EXP_SLTCAP_EIP | \
  107. PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
  108. PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
  109. #define PERST_DELAY_US 1000
  110. #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
  111. #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
  112. #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
  113. struct qcom_pcie_resources_2_1_0 {
  114. struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
  115. struct reset_control *pci_reset;
  116. struct reset_control *axi_reset;
  117. struct reset_control *ahb_reset;
  118. struct reset_control *por_reset;
  119. struct reset_control *phy_reset;
  120. struct reset_control *ext_reset;
  121. struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
  122. };
  123. struct qcom_pcie_resources_1_0_0 {
  124. struct clk *iface;
  125. struct clk *aux;
  126. struct clk *master_bus;
  127. struct clk *slave_bus;
  128. struct reset_control *core;
  129. struct regulator *vdda;
  130. };
  131. #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
  132. struct qcom_pcie_resources_2_3_2 {
  133. struct clk *aux_clk;
  134. struct clk *master_clk;
  135. struct clk *slave_clk;
  136. struct clk *cfg_clk;
  137. struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
  138. };
  139. #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
  140. struct qcom_pcie_resources_2_4_0 {
  141. struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
  142. int num_clks;
  143. struct reset_control *axi_m_reset;
  144. struct reset_control *axi_s_reset;
  145. struct reset_control *pipe_reset;
  146. struct reset_control *axi_m_vmid_reset;
  147. struct reset_control *axi_s_xpu_reset;
  148. struct reset_control *parf_reset;
  149. struct reset_control *phy_reset;
  150. struct reset_control *axi_m_sticky_reset;
  151. struct reset_control *pipe_sticky_reset;
  152. struct reset_control *pwr_reset;
  153. struct reset_control *ahb_reset;
  154. struct reset_control *phy_ahb_reset;
  155. };
  156. struct qcom_pcie_resources_2_3_3 {
  157. struct clk *iface;
  158. struct clk *axi_m_clk;
  159. struct clk *axi_s_clk;
  160. struct clk *ahb_clk;
  161. struct clk *aux_clk;
  162. struct reset_control *rst[7];
  163. };
  164. /* 6 clocks typically, 7 for sm8250 */
  165. struct qcom_pcie_resources_2_7_0 {
  166. struct clk_bulk_data clks[12];
  167. int num_clks;
  168. struct regulator_bulk_data supplies[2];
  169. struct reset_control *pci_reset;
  170. };
  171. struct qcom_pcie_resources_2_9_0 {
  172. struct clk_bulk_data clks[5];
  173. struct reset_control *rst;
  174. };
  175. union qcom_pcie_resources {
  176. struct qcom_pcie_resources_1_0_0 v1_0_0;
  177. struct qcom_pcie_resources_2_1_0 v2_1_0;
  178. struct qcom_pcie_resources_2_3_2 v2_3_2;
  179. struct qcom_pcie_resources_2_3_3 v2_3_3;
  180. struct qcom_pcie_resources_2_4_0 v2_4_0;
  181. struct qcom_pcie_resources_2_7_0 v2_7_0;
  182. struct qcom_pcie_resources_2_9_0 v2_9_0;
  183. };
  184. struct qcom_pcie;
  185. struct qcom_pcie_ops {
  186. int (*get_resources)(struct qcom_pcie *pcie);
  187. int (*init)(struct qcom_pcie *pcie);
  188. int (*post_init)(struct qcom_pcie *pcie);
  189. void (*deinit)(struct qcom_pcie *pcie);
  190. void (*ltssm_enable)(struct qcom_pcie *pcie);
  191. int (*config_sid)(struct qcom_pcie *pcie);
  192. };
  193. struct qcom_pcie_cfg {
  194. const struct qcom_pcie_ops *ops;
  195. };
  196. struct qcom_pcie {
  197. struct dw_pcie *pci;
  198. void __iomem *parf; /* DT parf */
  199. void __iomem *elbi; /* DT elbi */
  200. union qcom_pcie_resources res;
  201. struct phy *phy;
  202. struct gpio_desc *reset;
  203. const struct qcom_pcie_cfg *cfg;
  204. };
  205. #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
  206. static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
  207. {
  208. gpiod_set_value_cansleep(pcie->reset, 1);
  209. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  210. }
  211. static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
  212. {
  213. /* Ensure that PERST has been asserted for at least 100 ms */
  214. msleep(100);
  215. gpiod_set_value_cansleep(pcie->reset, 0);
  216. usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
  217. }
  218. static int qcom_pcie_start_link(struct dw_pcie *pci)
  219. {
  220. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  221. /* Enable Link Training state machine */
  222. if (pcie->cfg->ops->ltssm_enable)
  223. pcie->cfg->ops->ltssm_enable(pcie);
  224. return 0;
  225. }
  226. static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
  227. {
  228. u32 val;
  229. /* enable link training */
  230. val = readl(pcie->elbi + ELBI_SYS_CTRL);
  231. val |= ELBI_SYS_CTRL_LT_ENABLE;
  232. writel(val, pcie->elbi + ELBI_SYS_CTRL);
  233. }
  234. static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
  235. {
  236. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  237. struct dw_pcie *pci = pcie->pci;
  238. struct device *dev = pci->dev;
  239. int ret;
  240. res->supplies[0].supply = "vdda";
  241. res->supplies[1].supply = "vdda_phy";
  242. res->supplies[2].supply = "vdda_refclk";
  243. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  244. res->supplies);
  245. if (ret)
  246. return ret;
  247. res->clks[0].id = "iface";
  248. res->clks[1].id = "core";
  249. res->clks[2].id = "phy";
  250. res->clks[3].id = "aux";
  251. res->clks[4].id = "ref";
  252. /* iface, core, phy are required */
  253. ret = devm_clk_bulk_get(dev, 3, res->clks);
  254. if (ret < 0)
  255. return ret;
  256. /* aux, ref are optional */
  257. ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
  258. if (ret < 0)
  259. return ret;
  260. res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
  261. if (IS_ERR(res->pci_reset))
  262. return PTR_ERR(res->pci_reset);
  263. res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
  264. if (IS_ERR(res->axi_reset))
  265. return PTR_ERR(res->axi_reset);
  266. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  267. if (IS_ERR(res->ahb_reset))
  268. return PTR_ERR(res->ahb_reset);
  269. res->por_reset = devm_reset_control_get_exclusive(dev, "por");
  270. if (IS_ERR(res->por_reset))
  271. return PTR_ERR(res->por_reset);
  272. res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
  273. if (IS_ERR(res->ext_reset))
  274. return PTR_ERR(res->ext_reset);
  275. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  276. return PTR_ERR_OR_ZERO(res->phy_reset);
  277. }
  278. static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
  279. {
  280. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  281. clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
  282. reset_control_assert(res->pci_reset);
  283. reset_control_assert(res->axi_reset);
  284. reset_control_assert(res->ahb_reset);
  285. reset_control_assert(res->por_reset);
  286. reset_control_assert(res->ext_reset);
  287. reset_control_assert(res->phy_reset);
  288. writel(1, pcie->parf + PARF_PHY_CTRL);
  289. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  290. }
  291. static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
  292. {
  293. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  294. struct dw_pcie *pci = pcie->pci;
  295. struct device *dev = pci->dev;
  296. int ret;
  297. /* reset the PCIe interface as uboot can leave it undefined state */
  298. reset_control_assert(res->pci_reset);
  299. reset_control_assert(res->axi_reset);
  300. reset_control_assert(res->ahb_reset);
  301. reset_control_assert(res->por_reset);
  302. reset_control_assert(res->ext_reset);
  303. reset_control_assert(res->phy_reset);
  304. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  305. if (ret < 0) {
  306. dev_err(dev, "cannot enable regulators\n");
  307. return ret;
  308. }
  309. ret = reset_control_deassert(res->ahb_reset);
  310. if (ret) {
  311. dev_err(dev, "cannot deassert ahb reset\n");
  312. goto err_deassert_ahb;
  313. }
  314. ret = reset_control_deassert(res->ext_reset);
  315. if (ret) {
  316. dev_err(dev, "cannot deassert ext reset\n");
  317. goto err_deassert_ext;
  318. }
  319. ret = reset_control_deassert(res->phy_reset);
  320. if (ret) {
  321. dev_err(dev, "cannot deassert phy reset\n");
  322. goto err_deassert_phy;
  323. }
  324. ret = reset_control_deassert(res->pci_reset);
  325. if (ret) {
  326. dev_err(dev, "cannot deassert pci reset\n");
  327. goto err_deassert_pci;
  328. }
  329. ret = reset_control_deassert(res->por_reset);
  330. if (ret) {
  331. dev_err(dev, "cannot deassert por reset\n");
  332. goto err_deassert_por;
  333. }
  334. ret = reset_control_deassert(res->axi_reset);
  335. if (ret) {
  336. dev_err(dev, "cannot deassert axi reset\n");
  337. goto err_deassert_axi;
  338. }
  339. return 0;
  340. err_deassert_axi:
  341. reset_control_assert(res->por_reset);
  342. err_deassert_por:
  343. reset_control_assert(res->pci_reset);
  344. err_deassert_pci:
  345. reset_control_assert(res->phy_reset);
  346. err_deassert_phy:
  347. reset_control_assert(res->ext_reset);
  348. err_deassert_ext:
  349. reset_control_assert(res->ahb_reset);
  350. err_deassert_ahb:
  351. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  352. return ret;
  353. }
  354. static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
  355. {
  356. struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
  357. struct dw_pcie *pci = pcie->pci;
  358. struct device *dev = pci->dev;
  359. struct device_node *node = dev->of_node;
  360. u32 val;
  361. int ret;
  362. /* enable PCIe clocks and resets */
  363. val = readl(pcie->parf + PARF_PHY_CTRL);
  364. val &= ~BIT(0);
  365. writel(val, pcie->parf + PARF_PHY_CTRL);
  366. ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
  367. if (ret)
  368. return ret;
  369. if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
  370. of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
  371. writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
  372. PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
  373. PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
  374. pcie->parf + PARF_PCS_DEEMPH);
  375. writel(PCS_SWING_TX_SWING_FULL(120) |
  376. PCS_SWING_TX_SWING_LOW(120),
  377. pcie->parf + PARF_PCS_SWING);
  378. writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
  379. }
  380. if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
  381. /* set TX termination offset */
  382. val = readl(pcie->parf + PARF_PHY_CTRL);
  383. val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
  384. val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
  385. writel(val, pcie->parf + PARF_PHY_CTRL);
  386. }
  387. /* enable external reference clock */
  388. val = readl(pcie->parf + PARF_PHY_REFCLK);
  389. /* USE_PAD is required only for ipq806x */
  390. if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
  391. val &= ~PHY_REFCLK_USE_PAD;
  392. val |= PHY_REFCLK_SSP_EN;
  393. writel(val, pcie->parf + PARF_PHY_REFCLK);
  394. /* wait for clock acquisition */
  395. usleep_range(1000, 1500);
  396. /* Set the Max TLP size to 2K, instead of using default of 4K */
  397. writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
  398. pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
  399. writel(CFG_BRIDGE_SB_INIT,
  400. pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
  401. return 0;
  402. }
  403. static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
  404. {
  405. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  406. struct dw_pcie *pci = pcie->pci;
  407. struct device *dev = pci->dev;
  408. res->vdda = devm_regulator_get(dev, "vdda");
  409. if (IS_ERR(res->vdda))
  410. return PTR_ERR(res->vdda);
  411. res->iface = devm_clk_get(dev, "iface");
  412. if (IS_ERR(res->iface))
  413. return PTR_ERR(res->iface);
  414. res->aux = devm_clk_get(dev, "aux");
  415. if (IS_ERR(res->aux))
  416. return PTR_ERR(res->aux);
  417. res->master_bus = devm_clk_get(dev, "master_bus");
  418. if (IS_ERR(res->master_bus))
  419. return PTR_ERR(res->master_bus);
  420. res->slave_bus = devm_clk_get(dev, "slave_bus");
  421. if (IS_ERR(res->slave_bus))
  422. return PTR_ERR(res->slave_bus);
  423. res->core = devm_reset_control_get_exclusive(dev, "core");
  424. return PTR_ERR_OR_ZERO(res->core);
  425. }
  426. static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
  427. {
  428. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  429. reset_control_assert(res->core);
  430. clk_disable_unprepare(res->slave_bus);
  431. clk_disable_unprepare(res->master_bus);
  432. clk_disable_unprepare(res->iface);
  433. clk_disable_unprepare(res->aux);
  434. regulator_disable(res->vdda);
  435. }
  436. static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
  437. {
  438. struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
  439. struct dw_pcie *pci = pcie->pci;
  440. struct device *dev = pci->dev;
  441. int ret;
  442. ret = reset_control_deassert(res->core);
  443. if (ret) {
  444. dev_err(dev, "cannot deassert core reset\n");
  445. return ret;
  446. }
  447. ret = clk_prepare_enable(res->aux);
  448. if (ret) {
  449. dev_err(dev, "cannot prepare/enable aux clock\n");
  450. goto err_res;
  451. }
  452. ret = clk_prepare_enable(res->iface);
  453. if (ret) {
  454. dev_err(dev, "cannot prepare/enable iface clock\n");
  455. goto err_aux;
  456. }
  457. ret = clk_prepare_enable(res->master_bus);
  458. if (ret) {
  459. dev_err(dev, "cannot prepare/enable master_bus clock\n");
  460. goto err_iface;
  461. }
  462. ret = clk_prepare_enable(res->slave_bus);
  463. if (ret) {
  464. dev_err(dev, "cannot prepare/enable slave_bus clock\n");
  465. goto err_master;
  466. }
  467. ret = regulator_enable(res->vdda);
  468. if (ret) {
  469. dev_err(dev, "cannot enable vdda regulator\n");
  470. goto err_slave;
  471. }
  472. return 0;
  473. err_slave:
  474. clk_disable_unprepare(res->slave_bus);
  475. err_master:
  476. clk_disable_unprepare(res->master_bus);
  477. err_iface:
  478. clk_disable_unprepare(res->iface);
  479. err_aux:
  480. clk_disable_unprepare(res->aux);
  481. err_res:
  482. reset_control_assert(res->core);
  483. return ret;
  484. }
  485. static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
  486. {
  487. /* change DBI base address */
  488. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  489. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  490. u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
  491. val |= BIT(31);
  492. writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
  493. }
  494. return 0;
  495. }
  496. static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
  497. {
  498. u32 val;
  499. /* enable link training */
  500. val = readl(pcie->parf + PARF_LTSSM);
  501. val |= BIT(8);
  502. writel(val, pcie->parf + PARF_LTSSM);
  503. }
  504. static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
  505. {
  506. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  507. struct dw_pcie *pci = pcie->pci;
  508. struct device *dev = pci->dev;
  509. int ret;
  510. res->supplies[0].supply = "vdda";
  511. res->supplies[1].supply = "vddpe-3v3";
  512. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  513. res->supplies);
  514. if (ret)
  515. return ret;
  516. res->aux_clk = devm_clk_get(dev, "aux");
  517. if (IS_ERR(res->aux_clk))
  518. return PTR_ERR(res->aux_clk);
  519. res->cfg_clk = devm_clk_get(dev, "cfg");
  520. if (IS_ERR(res->cfg_clk))
  521. return PTR_ERR(res->cfg_clk);
  522. res->master_clk = devm_clk_get(dev, "bus_master");
  523. if (IS_ERR(res->master_clk))
  524. return PTR_ERR(res->master_clk);
  525. res->slave_clk = devm_clk_get(dev, "bus_slave");
  526. if (IS_ERR(res->slave_clk))
  527. return PTR_ERR(res->slave_clk);
  528. return 0;
  529. }
  530. static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
  531. {
  532. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  533. clk_disable_unprepare(res->slave_clk);
  534. clk_disable_unprepare(res->master_clk);
  535. clk_disable_unprepare(res->cfg_clk);
  536. clk_disable_unprepare(res->aux_clk);
  537. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  538. }
  539. static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
  540. {
  541. struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
  542. struct dw_pcie *pci = pcie->pci;
  543. struct device *dev = pci->dev;
  544. int ret;
  545. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  546. if (ret < 0) {
  547. dev_err(dev, "cannot enable regulators\n");
  548. return ret;
  549. }
  550. ret = clk_prepare_enable(res->aux_clk);
  551. if (ret) {
  552. dev_err(dev, "cannot prepare/enable aux clock\n");
  553. goto err_aux_clk;
  554. }
  555. ret = clk_prepare_enable(res->cfg_clk);
  556. if (ret) {
  557. dev_err(dev, "cannot prepare/enable cfg clock\n");
  558. goto err_cfg_clk;
  559. }
  560. ret = clk_prepare_enable(res->master_clk);
  561. if (ret) {
  562. dev_err(dev, "cannot prepare/enable master clock\n");
  563. goto err_master_clk;
  564. }
  565. ret = clk_prepare_enable(res->slave_clk);
  566. if (ret) {
  567. dev_err(dev, "cannot prepare/enable slave clock\n");
  568. goto err_slave_clk;
  569. }
  570. return 0;
  571. err_slave_clk:
  572. clk_disable_unprepare(res->master_clk);
  573. err_master_clk:
  574. clk_disable_unprepare(res->cfg_clk);
  575. err_cfg_clk:
  576. clk_disable_unprepare(res->aux_clk);
  577. err_aux_clk:
  578. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  579. return ret;
  580. }
  581. static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
  582. {
  583. u32 val;
  584. /* enable PCIe clocks and resets */
  585. val = readl(pcie->parf + PARF_PHY_CTRL);
  586. val &= ~BIT(0);
  587. writel(val, pcie->parf + PARF_PHY_CTRL);
  588. /* change DBI base address */
  589. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  590. /* MAC PHY_POWERDOWN MUX DISABLE */
  591. val = readl(pcie->parf + PARF_SYS_CTRL);
  592. val &= ~BIT(29);
  593. writel(val, pcie->parf + PARF_SYS_CTRL);
  594. val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  595. val |= BIT(4);
  596. writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  597. val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  598. val |= BIT(31);
  599. writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  600. return 0;
  601. }
  602. static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
  603. {
  604. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  605. struct dw_pcie *pci = pcie->pci;
  606. struct device *dev = pci->dev;
  607. bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
  608. int ret;
  609. res->clks[0].id = "aux";
  610. res->clks[1].id = "master_bus";
  611. res->clks[2].id = "slave_bus";
  612. res->clks[3].id = "iface";
  613. /* qcom,pcie-ipq4019 is defined without "iface" */
  614. res->num_clks = is_ipq ? 3 : 4;
  615. ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
  616. if (ret < 0)
  617. return ret;
  618. res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
  619. if (IS_ERR(res->axi_m_reset))
  620. return PTR_ERR(res->axi_m_reset);
  621. res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
  622. if (IS_ERR(res->axi_s_reset))
  623. return PTR_ERR(res->axi_s_reset);
  624. if (is_ipq) {
  625. /*
  626. * These resources relates to the PHY or are secure clocks, but
  627. * are controlled here for IPQ4019
  628. */
  629. res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
  630. if (IS_ERR(res->pipe_reset))
  631. return PTR_ERR(res->pipe_reset);
  632. res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
  633. "axi_m_vmid");
  634. if (IS_ERR(res->axi_m_vmid_reset))
  635. return PTR_ERR(res->axi_m_vmid_reset);
  636. res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
  637. "axi_s_xpu");
  638. if (IS_ERR(res->axi_s_xpu_reset))
  639. return PTR_ERR(res->axi_s_xpu_reset);
  640. res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
  641. if (IS_ERR(res->parf_reset))
  642. return PTR_ERR(res->parf_reset);
  643. res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
  644. if (IS_ERR(res->phy_reset))
  645. return PTR_ERR(res->phy_reset);
  646. }
  647. res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
  648. "axi_m_sticky");
  649. if (IS_ERR(res->axi_m_sticky_reset))
  650. return PTR_ERR(res->axi_m_sticky_reset);
  651. res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
  652. "pipe_sticky");
  653. if (IS_ERR(res->pipe_sticky_reset))
  654. return PTR_ERR(res->pipe_sticky_reset);
  655. res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
  656. if (IS_ERR(res->pwr_reset))
  657. return PTR_ERR(res->pwr_reset);
  658. res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
  659. if (IS_ERR(res->ahb_reset))
  660. return PTR_ERR(res->ahb_reset);
  661. if (is_ipq) {
  662. res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
  663. if (IS_ERR(res->phy_ahb_reset))
  664. return PTR_ERR(res->phy_ahb_reset);
  665. }
  666. return 0;
  667. }
  668. static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
  669. {
  670. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  671. reset_control_assert(res->axi_m_reset);
  672. reset_control_assert(res->axi_s_reset);
  673. reset_control_assert(res->pipe_reset);
  674. reset_control_assert(res->pipe_sticky_reset);
  675. reset_control_assert(res->phy_reset);
  676. reset_control_assert(res->phy_ahb_reset);
  677. reset_control_assert(res->axi_m_sticky_reset);
  678. reset_control_assert(res->pwr_reset);
  679. reset_control_assert(res->ahb_reset);
  680. clk_bulk_disable_unprepare(res->num_clks, res->clks);
  681. }
  682. static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
  683. {
  684. struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
  685. struct dw_pcie *pci = pcie->pci;
  686. struct device *dev = pci->dev;
  687. int ret;
  688. ret = reset_control_assert(res->axi_m_reset);
  689. if (ret) {
  690. dev_err(dev, "cannot assert axi master reset\n");
  691. return ret;
  692. }
  693. ret = reset_control_assert(res->axi_s_reset);
  694. if (ret) {
  695. dev_err(dev, "cannot assert axi slave reset\n");
  696. return ret;
  697. }
  698. usleep_range(10000, 12000);
  699. ret = reset_control_assert(res->pipe_reset);
  700. if (ret) {
  701. dev_err(dev, "cannot assert pipe reset\n");
  702. return ret;
  703. }
  704. ret = reset_control_assert(res->pipe_sticky_reset);
  705. if (ret) {
  706. dev_err(dev, "cannot assert pipe sticky reset\n");
  707. return ret;
  708. }
  709. ret = reset_control_assert(res->phy_reset);
  710. if (ret) {
  711. dev_err(dev, "cannot assert phy reset\n");
  712. return ret;
  713. }
  714. ret = reset_control_assert(res->phy_ahb_reset);
  715. if (ret) {
  716. dev_err(dev, "cannot assert phy ahb reset\n");
  717. return ret;
  718. }
  719. usleep_range(10000, 12000);
  720. ret = reset_control_assert(res->axi_m_sticky_reset);
  721. if (ret) {
  722. dev_err(dev, "cannot assert axi master sticky reset\n");
  723. return ret;
  724. }
  725. ret = reset_control_assert(res->pwr_reset);
  726. if (ret) {
  727. dev_err(dev, "cannot assert power reset\n");
  728. return ret;
  729. }
  730. ret = reset_control_assert(res->ahb_reset);
  731. if (ret) {
  732. dev_err(dev, "cannot assert ahb reset\n");
  733. return ret;
  734. }
  735. usleep_range(10000, 12000);
  736. ret = reset_control_deassert(res->phy_ahb_reset);
  737. if (ret) {
  738. dev_err(dev, "cannot deassert phy ahb reset\n");
  739. return ret;
  740. }
  741. ret = reset_control_deassert(res->phy_reset);
  742. if (ret) {
  743. dev_err(dev, "cannot deassert phy reset\n");
  744. goto err_rst_phy;
  745. }
  746. ret = reset_control_deassert(res->pipe_reset);
  747. if (ret) {
  748. dev_err(dev, "cannot deassert pipe reset\n");
  749. goto err_rst_pipe;
  750. }
  751. ret = reset_control_deassert(res->pipe_sticky_reset);
  752. if (ret) {
  753. dev_err(dev, "cannot deassert pipe sticky reset\n");
  754. goto err_rst_pipe_sticky;
  755. }
  756. usleep_range(10000, 12000);
  757. ret = reset_control_deassert(res->axi_m_reset);
  758. if (ret) {
  759. dev_err(dev, "cannot deassert axi master reset\n");
  760. goto err_rst_axi_m;
  761. }
  762. ret = reset_control_deassert(res->axi_m_sticky_reset);
  763. if (ret) {
  764. dev_err(dev, "cannot deassert axi master sticky reset\n");
  765. goto err_rst_axi_m_sticky;
  766. }
  767. ret = reset_control_deassert(res->axi_s_reset);
  768. if (ret) {
  769. dev_err(dev, "cannot deassert axi slave reset\n");
  770. goto err_rst_axi_s;
  771. }
  772. ret = reset_control_deassert(res->pwr_reset);
  773. if (ret) {
  774. dev_err(dev, "cannot deassert power reset\n");
  775. goto err_rst_pwr;
  776. }
  777. ret = reset_control_deassert(res->ahb_reset);
  778. if (ret) {
  779. dev_err(dev, "cannot deassert ahb reset\n");
  780. goto err_rst_ahb;
  781. }
  782. usleep_range(10000, 12000);
  783. ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
  784. if (ret)
  785. goto err_clks;
  786. return 0;
  787. err_clks:
  788. reset_control_assert(res->ahb_reset);
  789. err_rst_ahb:
  790. reset_control_assert(res->pwr_reset);
  791. err_rst_pwr:
  792. reset_control_assert(res->axi_s_reset);
  793. err_rst_axi_s:
  794. reset_control_assert(res->axi_m_sticky_reset);
  795. err_rst_axi_m_sticky:
  796. reset_control_assert(res->axi_m_reset);
  797. err_rst_axi_m:
  798. reset_control_assert(res->pipe_sticky_reset);
  799. err_rst_pipe_sticky:
  800. reset_control_assert(res->pipe_reset);
  801. err_rst_pipe:
  802. reset_control_assert(res->phy_reset);
  803. err_rst_phy:
  804. reset_control_assert(res->phy_ahb_reset);
  805. return ret;
  806. }
  807. static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
  808. {
  809. u32 val;
  810. /* enable PCIe clocks and resets */
  811. val = readl(pcie->parf + PARF_PHY_CTRL);
  812. val &= ~BIT(0);
  813. writel(val, pcie->parf + PARF_PHY_CTRL);
  814. /* change DBI base address */
  815. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  816. /* MAC PHY_POWERDOWN MUX DISABLE */
  817. val = readl(pcie->parf + PARF_SYS_CTRL);
  818. val &= ~BIT(29);
  819. writel(val, pcie->parf + PARF_SYS_CTRL);
  820. val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  821. val |= BIT(4);
  822. writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  823. val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  824. val |= BIT(31);
  825. writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  826. return 0;
  827. }
  828. static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
  829. {
  830. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  831. struct dw_pcie *pci = pcie->pci;
  832. struct device *dev = pci->dev;
  833. int i;
  834. const char *rst_names[] = { "axi_m", "axi_s", "pipe",
  835. "axi_m_sticky", "sticky",
  836. "ahb", "sleep", };
  837. res->iface = devm_clk_get(dev, "iface");
  838. if (IS_ERR(res->iface))
  839. return PTR_ERR(res->iface);
  840. res->axi_m_clk = devm_clk_get(dev, "axi_m");
  841. if (IS_ERR(res->axi_m_clk))
  842. return PTR_ERR(res->axi_m_clk);
  843. res->axi_s_clk = devm_clk_get(dev, "axi_s");
  844. if (IS_ERR(res->axi_s_clk))
  845. return PTR_ERR(res->axi_s_clk);
  846. res->ahb_clk = devm_clk_get(dev, "ahb");
  847. if (IS_ERR(res->ahb_clk))
  848. return PTR_ERR(res->ahb_clk);
  849. res->aux_clk = devm_clk_get(dev, "aux");
  850. if (IS_ERR(res->aux_clk))
  851. return PTR_ERR(res->aux_clk);
  852. for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
  853. res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
  854. if (IS_ERR(res->rst[i]))
  855. return PTR_ERR(res->rst[i]);
  856. }
  857. return 0;
  858. }
  859. static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
  860. {
  861. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  862. clk_disable_unprepare(res->iface);
  863. clk_disable_unprepare(res->axi_m_clk);
  864. clk_disable_unprepare(res->axi_s_clk);
  865. clk_disable_unprepare(res->ahb_clk);
  866. clk_disable_unprepare(res->aux_clk);
  867. }
  868. static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
  869. {
  870. struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
  871. struct dw_pcie *pci = pcie->pci;
  872. struct device *dev = pci->dev;
  873. int i, ret;
  874. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  875. ret = reset_control_assert(res->rst[i]);
  876. if (ret) {
  877. dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
  878. return ret;
  879. }
  880. }
  881. usleep_range(2000, 2500);
  882. for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
  883. ret = reset_control_deassert(res->rst[i]);
  884. if (ret) {
  885. dev_err(dev, "reset #%d deassert failed (%d)\n", i,
  886. ret);
  887. return ret;
  888. }
  889. }
  890. /*
  891. * Don't have a way to see if the reset has completed.
  892. * Wait for some time.
  893. */
  894. usleep_range(2000, 2500);
  895. ret = clk_prepare_enable(res->iface);
  896. if (ret) {
  897. dev_err(dev, "cannot prepare/enable core clock\n");
  898. goto err_clk_iface;
  899. }
  900. ret = clk_prepare_enable(res->axi_m_clk);
  901. if (ret) {
  902. dev_err(dev, "cannot prepare/enable core clock\n");
  903. goto err_clk_axi_m;
  904. }
  905. ret = clk_prepare_enable(res->axi_s_clk);
  906. if (ret) {
  907. dev_err(dev, "cannot prepare/enable axi slave clock\n");
  908. goto err_clk_axi_s;
  909. }
  910. ret = clk_prepare_enable(res->ahb_clk);
  911. if (ret) {
  912. dev_err(dev, "cannot prepare/enable ahb clock\n");
  913. goto err_clk_ahb;
  914. }
  915. ret = clk_prepare_enable(res->aux_clk);
  916. if (ret) {
  917. dev_err(dev, "cannot prepare/enable aux clock\n");
  918. goto err_clk_aux;
  919. }
  920. return 0;
  921. err_clk_aux:
  922. clk_disable_unprepare(res->ahb_clk);
  923. err_clk_ahb:
  924. clk_disable_unprepare(res->axi_s_clk);
  925. err_clk_axi_s:
  926. clk_disable_unprepare(res->axi_m_clk);
  927. err_clk_axi_m:
  928. clk_disable_unprepare(res->iface);
  929. err_clk_iface:
  930. /*
  931. * Not checking for failure, will anyway return
  932. * the original failure in 'ret'.
  933. */
  934. for (i = 0; i < ARRAY_SIZE(res->rst); i++)
  935. reset_control_assert(res->rst[i]);
  936. return ret;
  937. }
  938. static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
  939. {
  940. struct dw_pcie *pci = pcie->pci;
  941. u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
  942. u32 val;
  943. writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
  944. val = readl(pcie->parf + PARF_PHY_CTRL);
  945. val &= ~BIT(0);
  946. writel(val, pcie->parf + PARF_PHY_CTRL);
  947. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  948. writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
  949. | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
  950. AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
  951. pcie->parf + PARF_SYS_CTRL);
  952. writel(0, pcie->parf + PARF_Q2A_FLUSH);
  953. writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
  954. dw_pcie_dbi_ro_wr_en(pci);
  955. writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
  956. val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
  957. val &= ~PCI_EXP_LNKCAP_ASPMS;
  958. writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
  959. writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
  960. PCI_EXP_DEVCTL2);
  961. dw_pcie_dbi_ro_wr_dis(pci);
  962. return 0;
  963. }
  964. static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
  965. {
  966. struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
  967. struct dw_pcie *pci = pcie->pci;
  968. struct device *dev = pci->dev;
  969. unsigned int num_clks, num_opt_clks;
  970. unsigned int idx;
  971. int ret;
  972. res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
  973. if (IS_ERR(res->pci_reset))
  974. return PTR_ERR(res->pci_reset);
  975. res->supplies[0].supply = "vdda";
  976. res->supplies[1].supply = "vddpe-3v3";
  977. ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
  978. res->supplies);
  979. if (ret)
  980. return ret;
  981. idx = 0;
  982. res->clks[idx++].id = "aux";
  983. res->clks[idx++].id = "cfg";
  984. res->clks[idx++].id = "bus_master";
  985. res->clks[idx++].id = "bus_slave";
  986. res->clks[idx++].id = "slave_q2a";
  987. num_clks = idx;
  988. ret = devm_clk_bulk_get(dev, num_clks, res->clks);
  989. if (ret < 0)
  990. return ret;
  991. res->clks[idx++].id = "tbu";
  992. res->clks[idx++].id = "ddrss_sf_tbu";
  993. res->clks[idx++].id = "aggre0";
  994. res->clks[idx++].id = "aggre1";
  995. res->clks[idx++].id = "noc_aggr_4";
  996. res->clks[idx++].id = "noc_aggr_south_sf";
  997. res->clks[idx++].id = "cnoc_qx";
  998. num_opt_clks = idx - num_clks;
  999. res->num_clks = idx;
  1000. ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
  1001. if (ret < 0)
  1002. return ret;
  1003. return 0;
  1004. }
  1005. static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
  1006. {
  1007. struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
  1008. struct dw_pcie *pci = pcie->pci;
  1009. struct device *dev = pci->dev;
  1010. u32 val;
  1011. int ret;
  1012. ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
  1013. if (ret < 0) {
  1014. dev_err(dev, "cannot enable regulators\n");
  1015. return ret;
  1016. }
  1017. ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
  1018. if (ret < 0)
  1019. goto err_disable_regulators;
  1020. ret = reset_control_assert(res->pci_reset);
  1021. if (ret < 0) {
  1022. dev_err(dev, "cannot deassert pci reset\n");
  1023. goto err_disable_clocks;
  1024. }
  1025. usleep_range(1000, 1500);
  1026. ret = reset_control_deassert(res->pci_reset);
  1027. if (ret < 0) {
  1028. dev_err(dev, "cannot deassert pci reset\n");
  1029. goto err_disable_clocks;
  1030. }
  1031. /* Wait for reset to complete, required on SM8450 */
  1032. usleep_range(1000, 1500);
  1033. /* configure PCIe to RC mode */
  1034. writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
  1035. /* enable PCIe clocks and resets */
  1036. val = readl(pcie->parf + PARF_PHY_CTRL);
  1037. val &= ~BIT(0);
  1038. writel(val, pcie->parf + PARF_PHY_CTRL);
  1039. /* change DBI base address */
  1040. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  1041. /* MAC PHY_POWERDOWN MUX DISABLE */
  1042. val = readl(pcie->parf + PARF_SYS_CTRL);
  1043. val &= ~BIT(29);
  1044. writel(val, pcie->parf + PARF_SYS_CTRL);
  1045. val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  1046. val |= BIT(4);
  1047. writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  1048. /* Enable L1 and L1SS */
  1049. val = readl(pcie->parf + PARF_PM_CTRL);
  1050. val &= ~REQ_NOT_ENTR_L1;
  1051. writel(val, pcie->parf + PARF_PM_CTRL);
  1052. val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  1053. val |= BIT(31);
  1054. writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
  1055. return 0;
  1056. err_disable_clocks:
  1057. clk_bulk_disable_unprepare(res->num_clks, res->clks);
  1058. err_disable_regulators:
  1059. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  1060. return ret;
  1061. }
  1062. static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
  1063. {
  1064. struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
  1065. clk_bulk_disable_unprepare(res->num_clks, res->clks);
  1066. regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
  1067. }
  1068. static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
  1069. {
  1070. struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
  1071. struct dw_pcie *pci = pcie->pci;
  1072. struct device *dev = pci->dev;
  1073. int ret;
  1074. res->clks[0].id = "iface";
  1075. res->clks[1].id = "axi_m";
  1076. res->clks[2].id = "axi_s";
  1077. res->clks[3].id = "axi_bridge";
  1078. res->clks[4].id = "rchng";
  1079. ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
  1080. if (ret < 0)
  1081. return ret;
  1082. res->rst = devm_reset_control_array_get_exclusive(dev);
  1083. if (IS_ERR(res->rst))
  1084. return PTR_ERR(res->rst);
  1085. return 0;
  1086. }
  1087. static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
  1088. {
  1089. struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
  1090. clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
  1091. }
  1092. static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
  1093. {
  1094. struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
  1095. struct device *dev = pcie->pci->dev;
  1096. int ret;
  1097. ret = reset_control_assert(res->rst);
  1098. if (ret) {
  1099. dev_err(dev, "reset assert failed (%d)\n", ret);
  1100. return ret;
  1101. }
  1102. /*
  1103. * Delay periods before and after reset deassert are working values
  1104. * from downstream Codeaurora kernel
  1105. */
  1106. usleep_range(2000, 2500);
  1107. ret = reset_control_deassert(res->rst);
  1108. if (ret) {
  1109. dev_err(dev, "reset deassert failed (%d)\n", ret);
  1110. return ret;
  1111. }
  1112. usleep_range(2000, 2500);
  1113. return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
  1114. }
  1115. static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
  1116. {
  1117. struct dw_pcie *pci = pcie->pci;
  1118. u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
  1119. u32 val;
  1120. int i;
  1121. writel(SLV_ADDR_SPACE_SZ,
  1122. pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
  1123. val = readl(pcie->parf + PARF_PHY_CTRL);
  1124. val &= ~BIT(0);
  1125. writel(val, pcie->parf + PARF_PHY_CTRL);
  1126. writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
  1127. writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
  1128. writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
  1129. pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
  1130. writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
  1131. GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
  1132. pci->dbi_base + GEN3_RELATED_OFF);
  1133. writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
  1134. SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
  1135. AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
  1136. pcie->parf + PARF_SYS_CTRL);
  1137. writel(0, pcie->parf + PARF_Q2A_FLUSH);
  1138. dw_pcie_dbi_ro_wr_en(pci);
  1139. writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
  1140. val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
  1141. val &= ~PCI_EXP_LNKCAP_ASPMS;
  1142. writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
  1143. writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
  1144. PCI_EXP_DEVCTL2);
  1145. dw_pcie_dbi_ro_wr_dis(pci);
  1146. for (i = 0; i < 256; i++)
  1147. writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
  1148. return 0;
  1149. }
  1150. static int qcom_pcie_link_up(struct dw_pcie *pci)
  1151. {
  1152. u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
  1153. u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
  1154. return !!(val & PCI_EXP_LNKSTA_DLLLA);
  1155. }
  1156. static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
  1157. {
  1158. /* iommu map structure */
  1159. struct {
  1160. u32 bdf;
  1161. u32 phandle;
  1162. u32 smmu_sid;
  1163. u32 smmu_sid_len;
  1164. } *map;
  1165. void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
  1166. struct device *dev = pcie->pci->dev;
  1167. u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
  1168. int i, nr_map, size = 0;
  1169. u32 smmu_sid_base;
  1170. of_get_property(dev->of_node, "iommu-map", &size);
  1171. if (!size)
  1172. return 0;
  1173. map = kzalloc(size, GFP_KERNEL);
  1174. if (!map)
  1175. return -ENOMEM;
  1176. of_property_read_u32_array(dev->of_node,
  1177. "iommu-map", (u32 *)map, size / sizeof(u32));
  1178. nr_map = size / (sizeof(*map));
  1179. crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
  1180. /* Registers need to be zero out first */
  1181. memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
  1182. /* Extract the SMMU SID base from the first entry of iommu-map */
  1183. smmu_sid_base = map[0].smmu_sid;
  1184. /* Look for an available entry to hold the mapping */
  1185. for (i = 0; i < nr_map; i++) {
  1186. __be16 bdf_be = cpu_to_be16(map[i].bdf);
  1187. u32 val;
  1188. u8 hash;
  1189. hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
  1190. 0);
  1191. val = readl(bdf_to_sid_base + hash * sizeof(u32));
  1192. /* If the register is already populated, look for next available entry */
  1193. while (val) {
  1194. u8 current_hash = hash++;
  1195. u8 next_mask = 0xff;
  1196. /* If NEXT field is NULL then update it with next hash */
  1197. if (!(val & next_mask)) {
  1198. val |= (u32)hash;
  1199. writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
  1200. }
  1201. val = readl(bdf_to_sid_base + hash * sizeof(u32));
  1202. }
  1203. /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
  1204. val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
  1205. writel(val, bdf_to_sid_base + hash * sizeof(u32));
  1206. }
  1207. kfree(map);
  1208. return 0;
  1209. }
  1210. static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
  1211. {
  1212. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  1213. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  1214. int ret;
  1215. qcom_ep_reset_assert(pcie);
  1216. ret = pcie->cfg->ops->init(pcie);
  1217. if (ret)
  1218. return ret;
  1219. ret = phy_power_on(pcie->phy);
  1220. if (ret)
  1221. goto err_deinit;
  1222. if (pcie->cfg->ops->post_init) {
  1223. ret = pcie->cfg->ops->post_init(pcie);
  1224. if (ret)
  1225. goto err_disable_phy;
  1226. }
  1227. qcom_ep_reset_deassert(pcie);
  1228. if (pcie->cfg->ops->config_sid) {
  1229. ret = pcie->cfg->ops->config_sid(pcie);
  1230. if (ret)
  1231. goto err_assert_reset;
  1232. }
  1233. return 0;
  1234. err_assert_reset:
  1235. qcom_ep_reset_assert(pcie);
  1236. err_disable_phy:
  1237. phy_power_off(pcie->phy);
  1238. err_deinit:
  1239. pcie->cfg->ops->deinit(pcie);
  1240. return ret;
  1241. }
  1242. static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
  1243. {
  1244. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  1245. struct qcom_pcie *pcie = to_qcom_pcie(pci);
  1246. qcom_ep_reset_assert(pcie);
  1247. phy_power_off(pcie->phy);
  1248. pcie->cfg->ops->deinit(pcie);
  1249. }
  1250. static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
  1251. .host_init = qcom_pcie_host_init,
  1252. .host_deinit = qcom_pcie_host_deinit,
  1253. };
  1254. /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
  1255. static const struct qcom_pcie_ops ops_2_1_0 = {
  1256. .get_resources = qcom_pcie_get_resources_2_1_0,
  1257. .init = qcom_pcie_init_2_1_0,
  1258. .post_init = qcom_pcie_post_init_2_1_0,
  1259. .deinit = qcom_pcie_deinit_2_1_0,
  1260. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  1261. };
  1262. /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */
  1263. static const struct qcom_pcie_ops ops_1_0_0 = {
  1264. .get_resources = qcom_pcie_get_resources_1_0_0,
  1265. .init = qcom_pcie_init_1_0_0,
  1266. .post_init = qcom_pcie_post_init_1_0_0,
  1267. .deinit = qcom_pcie_deinit_1_0_0,
  1268. .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
  1269. };
  1270. /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */
  1271. static const struct qcom_pcie_ops ops_2_3_2 = {
  1272. .get_resources = qcom_pcie_get_resources_2_3_2,
  1273. .init = qcom_pcie_init_2_3_2,
  1274. .post_init = qcom_pcie_post_init_2_3_2,
  1275. .deinit = qcom_pcie_deinit_2_3_2,
  1276. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1277. };
  1278. /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */
  1279. static const struct qcom_pcie_ops ops_2_4_0 = {
  1280. .get_resources = qcom_pcie_get_resources_2_4_0,
  1281. .init = qcom_pcie_init_2_4_0,
  1282. .post_init = qcom_pcie_post_init_2_4_0,
  1283. .deinit = qcom_pcie_deinit_2_4_0,
  1284. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1285. };
  1286. /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */
  1287. static const struct qcom_pcie_ops ops_2_3_3 = {
  1288. .get_resources = qcom_pcie_get_resources_2_3_3,
  1289. .init = qcom_pcie_init_2_3_3,
  1290. .post_init = qcom_pcie_post_init_2_3_3,
  1291. .deinit = qcom_pcie_deinit_2_3_3,
  1292. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1293. };
  1294. /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
  1295. static const struct qcom_pcie_ops ops_2_7_0 = {
  1296. .get_resources = qcom_pcie_get_resources_2_7_0,
  1297. .init = qcom_pcie_init_2_7_0,
  1298. .deinit = qcom_pcie_deinit_2_7_0,
  1299. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1300. };
  1301. /* Qcom IP rev.: 1.9.0 */
  1302. static const struct qcom_pcie_ops ops_1_9_0 = {
  1303. .get_resources = qcom_pcie_get_resources_2_7_0,
  1304. .init = qcom_pcie_init_2_7_0,
  1305. .deinit = qcom_pcie_deinit_2_7_0,
  1306. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1307. .config_sid = qcom_pcie_config_sid_sm8250,
  1308. };
  1309. /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
  1310. static const struct qcom_pcie_ops ops_2_9_0 = {
  1311. .get_resources = qcom_pcie_get_resources_2_9_0,
  1312. .init = qcom_pcie_init_2_9_0,
  1313. .post_init = qcom_pcie_post_init_2_9_0,
  1314. .deinit = qcom_pcie_deinit_2_9_0,
  1315. .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
  1316. };
  1317. static const struct qcom_pcie_cfg cfg_1_0_0 = {
  1318. .ops = &ops_1_0_0,
  1319. };
  1320. static const struct qcom_pcie_cfg cfg_1_9_0 = {
  1321. .ops = &ops_1_9_0,
  1322. };
  1323. static const struct qcom_pcie_cfg cfg_2_1_0 = {
  1324. .ops = &ops_2_1_0,
  1325. };
  1326. static const struct qcom_pcie_cfg cfg_2_3_2 = {
  1327. .ops = &ops_2_3_2,
  1328. };
  1329. static const struct qcom_pcie_cfg cfg_2_3_3 = {
  1330. .ops = &ops_2_3_3,
  1331. };
  1332. static const struct qcom_pcie_cfg cfg_2_4_0 = {
  1333. .ops = &ops_2_4_0,
  1334. };
  1335. static const struct qcom_pcie_cfg cfg_2_7_0 = {
  1336. .ops = &ops_2_7_0,
  1337. };
  1338. static const struct qcom_pcie_cfg cfg_2_9_0 = {
  1339. .ops = &ops_2_9_0,
  1340. };
  1341. static const struct dw_pcie_ops dw_pcie_ops = {
  1342. .link_up = qcom_pcie_link_up,
  1343. .start_link = qcom_pcie_start_link,
  1344. };
  1345. static int qcom_pcie_probe(struct platform_device *pdev)
  1346. {
  1347. struct device *dev = &pdev->dev;
  1348. struct dw_pcie_rp *pp;
  1349. struct dw_pcie *pci;
  1350. struct qcom_pcie *pcie;
  1351. const struct qcom_pcie_cfg *pcie_cfg;
  1352. int ret;
  1353. pcie_cfg = of_device_get_match_data(dev);
  1354. if (!pcie_cfg || !pcie_cfg->ops) {
  1355. dev_err(dev, "Invalid platform data\n");
  1356. return -EINVAL;
  1357. }
  1358. pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
  1359. if (!pcie)
  1360. return -ENOMEM;
  1361. pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
  1362. if (!pci)
  1363. return -ENOMEM;
  1364. pm_runtime_enable(dev);
  1365. ret = pm_runtime_get_sync(dev);
  1366. if (ret < 0)
  1367. goto err_pm_runtime_put;
  1368. pci->dev = dev;
  1369. pci->ops = &dw_pcie_ops;
  1370. pp = &pci->pp;
  1371. pcie->pci = pci;
  1372. pcie->cfg = pcie_cfg;
  1373. pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
  1374. if (IS_ERR(pcie->reset)) {
  1375. ret = PTR_ERR(pcie->reset);
  1376. goto err_pm_runtime_put;
  1377. }
  1378. pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
  1379. if (IS_ERR(pcie->parf)) {
  1380. ret = PTR_ERR(pcie->parf);
  1381. goto err_pm_runtime_put;
  1382. }
  1383. pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
  1384. if (IS_ERR(pcie->elbi)) {
  1385. ret = PTR_ERR(pcie->elbi);
  1386. goto err_pm_runtime_put;
  1387. }
  1388. pcie->phy = devm_phy_optional_get(dev, "pciephy");
  1389. if (IS_ERR(pcie->phy)) {
  1390. ret = PTR_ERR(pcie->phy);
  1391. goto err_pm_runtime_put;
  1392. }
  1393. ret = pcie->cfg->ops->get_resources(pcie);
  1394. if (ret)
  1395. goto err_pm_runtime_put;
  1396. pp->ops = &qcom_pcie_dw_ops;
  1397. ret = phy_init(pcie->phy);
  1398. if (ret)
  1399. goto err_pm_runtime_put;
  1400. platform_set_drvdata(pdev, pcie);
  1401. ret = dw_pcie_host_init(pp);
  1402. if (ret) {
  1403. dev_err(dev, "cannot initialize host\n");
  1404. goto err_phy_exit;
  1405. }
  1406. return 0;
  1407. err_phy_exit:
  1408. phy_exit(pcie->phy);
  1409. err_pm_runtime_put:
  1410. pm_runtime_put(dev);
  1411. pm_runtime_disable(dev);
  1412. return ret;
  1413. }
  1414. static const struct of_device_id qcom_pcie_match[] = {
  1415. { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
  1416. { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
  1417. { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
  1418. { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
  1419. { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
  1420. { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
  1421. { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
  1422. { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
  1423. { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
  1424. { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
  1425. { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
  1426. { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
  1427. { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
  1428. { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
  1429. { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
  1430. { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
  1431. { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
  1432. { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
  1433. { }
  1434. };
  1435. static void qcom_fixup_class(struct pci_dev *dev)
  1436. {
  1437. dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
  1438. }
  1439. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
  1440. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
  1441. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
  1442. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
  1443. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
  1444. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
  1445. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
  1446. static struct platform_driver qcom_pcie_driver = {
  1447. .probe = qcom_pcie_probe,
  1448. .driver = {
  1449. .name = "qcom-pcie",
  1450. .suppress_bind_attrs = true,
  1451. .of_match_table = qcom_pcie_match,
  1452. },
  1453. };
  1454. builtin_platform_driver(qcom_pcie_driver);