pcie-cadence-host.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2017 Cadence
  3. // Cadence PCIe host controller driver.
  4. // Author: Cyrille Pitchen <[email protected]>
  5. #include <linux/delay.h>
  6. #include <linux/kernel.h>
  7. #include <linux/list_sort.h>
  8. #include <linux/of_address.h>
  9. #include <linux/of_pci.h>
  10. #include <linux/platform_device.h>
  11. #include "pcie-cadence.h"
  12. #define LINK_RETRAIN_TIMEOUT HZ
  13. static u64 bar_max_size[] = {
  14. [RP_BAR0] = _ULL(128 * SZ_2G),
  15. [RP_BAR1] = SZ_2G,
  16. [RP_NO_BAR] = _BITULL(63),
  17. };
  18. static u8 bar_aperture_mask[] = {
  19. [RP_BAR0] = 0x1F,
  20. [RP_BAR1] = 0xF,
  21. };
  22. void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
  23. int where)
  24. {
  25. struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
  26. struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
  27. struct cdns_pcie *pcie = &rc->pcie;
  28. unsigned int busn = bus->number;
  29. u32 addr0, desc0;
  30. if (pci_is_root_bus(bus)) {
  31. /*
  32. * Only the root port (devfn == 0) is connected to this bus.
  33. * All other PCI devices are behind some bridge hence on another
  34. * bus.
  35. */
  36. if (devfn)
  37. return NULL;
  38. return pcie->reg_base + (where & 0xfff);
  39. }
  40. /* Check that the link is up */
  41. if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
  42. return NULL;
  43. /* Clear AXI link-down status */
  44. cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
  45. /* Update Output registers for AXI region 0. */
  46. addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
  47. CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
  48. CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
  49. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
  50. /* Configuration Type 0 or Type 1 access. */
  51. desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
  52. CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
  53. /*
  54. * The bus number was already set once for all in desc1 by
  55. * cdns_pcie_host_init_address_translation().
  56. */
  57. if (busn == bridge->busnr + 1)
  58. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
  59. else
  60. desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
  61. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
  62. return rc->cfg_base + (where & 0xfff);
  63. }
  64. static struct pci_ops cdns_pcie_host_ops = {
  65. .map_bus = cdns_pci_map_bus,
  66. .read = pci_generic_config_read,
  67. .write = pci_generic_config_write,
  68. };
  69. static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
  70. {
  71. u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
  72. unsigned long end_jiffies;
  73. u16 lnk_stat;
  74. /* Wait for link training to complete. Exit after timeout. */
  75. end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
  76. do {
  77. lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
  78. if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
  79. break;
  80. usleep_range(0, 1000);
  81. } while (time_before(jiffies, end_jiffies));
  82. if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
  83. return 0;
  84. return -ETIMEDOUT;
  85. }
  86. static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
  87. {
  88. struct device *dev = pcie->dev;
  89. int retries;
  90. /* Check if the link is up or not */
  91. for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
  92. if (cdns_pcie_link_up(pcie)) {
  93. dev_info(dev, "Link up\n");
  94. return 0;
  95. }
  96. usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
  97. }
  98. return -ETIMEDOUT;
  99. }
  100. static int cdns_pcie_retrain(struct cdns_pcie *pcie)
  101. {
  102. u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
  103. u16 lnk_stat, lnk_ctl;
  104. int ret = 0;
  105. /*
  106. * Set retrain bit if current speed is 2.5 GB/s,
  107. * but the PCIe root port support is > 2.5 GB/s.
  108. */
  109. lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
  110. PCI_EXP_LNKCAP));
  111. if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
  112. return ret;
  113. lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
  114. if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
  115. lnk_ctl = cdns_pcie_rp_readw(pcie,
  116. pcie_cap_off + PCI_EXP_LNKCTL);
  117. lnk_ctl |= PCI_EXP_LNKCTL_RL;
  118. cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
  119. lnk_ctl);
  120. ret = cdns_pcie_host_training_complete(pcie);
  121. if (ret)
  122. return ret;
  123. ret = cdns_pcie_host_wait_for_link(pcie);
  124. }
  125. return ret;
  126. }
  127. static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
  128. {
  129. u32 val;
  130. val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
  131. cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
  132. }
  133. static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
  134. {
  135. struct cdns_pcie *pcie = &rc->pcie;
  136. int ret;
  137. ret = cdns_pcie_host_wait_for_link(pcie);
  138. /*
  139. * Retrain link for Gen2 training defect
  140. * if quirk flag is set.
  141. */
  142. if (!ret && rc->quirk_retrain_flag)
  143. ret = cdns_pcie_retrain(pcie);
  144. return ret;
  145. }
  146. static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
  147. {
  148. struct cdns_pcie *pcie = &rc->pcie;
  149. u32 value, ctrl;
  150. u32 id;
  151. /*
  152. * Set the root complex BAR configuration register:
  153. * - disable both BAR0 and BAR1.
  154. * - enable Prefetchable Memory Base and Limit registers in type 1
  155. * config space (64 bits).
  156. * - enable IO Base and Limit registers in type 1 config
  157. * space (32 bits).
  158. */
  159. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
  160. value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
  161. CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
  162. CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
  163. CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
  164. CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
  165. CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
  166. cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
  167. /* Set root port configuration space */
  168. if (rc->vendor_id != 0xffff) {
  169. id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
  170. CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
  171. cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
  172. }
  173. if (rc->device_id != 0xffff)
  174. cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
  175. cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
  176. cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
  177. cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
  178. return 0;
  179. }
  180. static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
  181. enum cdns_pcie_rp_bar bar,
  182. u64 cpu_addr, u64 size,
  183. unsigned long flags)
  184. {
  185. struct cdns_pcie *pcie = &rc->pcie;
  186. u32 addr0, addr1, aperture, value;
  187. if (!rc->avail_ib_bar[bar])
  188. return -EBUSY;
  189. rc->avail_ib_bar[bar] = false;
  190. aperture = ilog2(size);
  191. addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
  192. (lower_32_bits(cpu_addr) & GENMASK(31, 8));
  193. addr1 = upper_32_bits(cpu_addr);
  194. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
  195. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
  196. if (bar == RP_NO_BAR)
  197. return 0;
  198. value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
  199. value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
  200. LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
  201. LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
  202. LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
  203. LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
  204. if (size + cpu_addr >= SZ_4G) {
  205. if (!(flags & IORESOURCE_PREFETCH))
  206. value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
  207. value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
  208. } else {
  209. if (!(flags & IORESOURCE_PREFETCH))
  210. value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
  211. value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
  212. }
  213. value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
  214. cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
  215. return 0;
  216. }
  217. static enum cdns_pcie_rp_bar
  218. cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
  219. {
  220. enum cdns_pcie_rp_bar bar, sel_bar;
  221. sel_bar = RP_BAR_UNDEFINED;
  222. for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
  223. if (!rc->avail_ib_bar[bar])
  224. continue;
  225. if (size <= bar_max_size[bar]) {
  226. if (sel_bar == RP_BAR_UNDEFINED) {
  227. sel_bar = bar;
  228. continue;
  229. }
  230. if (bar_max_size[bar] < bar_max_size[sel_bar])
  231. sel_bar = bar;
  232. }
  233. }
  234. return sel_bar;
  235. }
  236. static enum cdns_pcie_rp_bar
  237. cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
  238. {
  239. enum cdns_pcie_rp_bar bar, sel_bar;
  240. sel_bar = RP_BAR_UNDEFINED;
  241. for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
  242. if (!rc->avail_ib_bar[bar])
  243. continue;
  244. if (size >= bar_max_size[bar]) {
  245. if (sel_bar == RP_BAR_UNDEFINED) {
  246. sel_bar = bar;
  247. continue;
  248. }
  249. if (bar_max_size[bar] > bar_max_size[sel_bar])
  250. sel_bar = bar;
  251. }
  252. }
  253. return sel_bar;
  254. }
  255. static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
  256. struct resource_entry *entry)
  257. {
  258. u64 cpu_addr, pci_addr, size, winsize;
  259. struct cdns_pcie *pcie = &rc->pcie;
  260. struct device *dev = pcie->dev;
  261. enum cdns_pcie_rp_bar bar;
  262. unsigned long flags;
  263. int ret;
  264. cpu_addr = entry->res->start;
  265. pci_addr = entry->res->start - entry->offset;
  266. flags = entry->res->flags;
  267. size = resource_size(entry->res);
  268. if (entry->offset) {
  269. dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
  270. pci_addr, cpu_addr);
  271. return -EINVAL;
  272. }
  273. while (size > 0) {
  274. /*
  275. * Try to find a minimum BAR whose size is greater than
  276. * or equal to the remaining resource_entry size. This will
  277. * fail if the size of each of the available BARs is less than
  278. * the remaining resource_entry size.
  279. * If a minimum BAR is found, IB ATU will be configured and
  280. * exited.
  281. */
  282. bar = cdns_pcie_host_find_min_bar(rc, size);
  283. if (bar != RP_BAR_UNDEFINED) {
  284. ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
  285. size, flags);
  286. if (ret)
  287. dev_err(dev, "IB BAR: %d config failed\n", bar);
  288. return ret;
  289. }
  290. /*
  291. * If the control reaches here, it would mean the remaining
  292. * resource_entry size cannot be fitted in a single BAR. So we
  293. * find a maximum BAR whose size is less than or equal to the
  294. * remaining resource_entry size and split the resource entry
  295. * so that part of resource entry is fitted inside the maximum
  296. * BAR. The remaining size would be fitted during the next
  297. * iteration of the loop.
  298. * If a maximum BAR is not found, there is no way we can fit
  299. * this resource_entry, so we error out.
  300. */
  301. bar = cdns_pcie_host_find_max_bar(rc, size);
  302. if (bar == RP_BAR_UNDEFINED) {
  303. dev_err(dev, "No free BAR to map cpu_addr %llx\n",
  304. cpu_addr);
  305. return -EINVAL;
  306. }
  307. winsize = bar_max_size[bar];
  308. ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
  309. flags);
  310. if (ret) {
  311. dev_err(dev, "IB BAR: %d config failed\n", bar);
  312. return ret;
  313. }
  314. size -= winsize;
  315. cpu_addr += winsize;
  316. }
  317. return 0;
  318. }
  319. static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
  320. const struct list_head *b)
  321. {
  322. struct resource_entry *entry1, *entry2;
  323. entry1 = container_of(a, struct resource_entry, node);
  324. entry2 = container_of(b, struct resource_entry, node);
  325. return resource_size(entry2->res) - resource_size(entry1->res);
  326. }
  327. static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
  328. {
  329. struct cdns_pcie *pcie = &rc->pcie;
  330. struct device *dev = pcie->dev;
  331. struct device_node *np = dev->of_node;
  332. struct pci_host_bridge *bridge;
  333. struct resource_entry *entry;
  334. u32 no_bar_nbits = 32;
  335. int err;
  336. bridge = pci_host_bridge_from_priv(rc);
  337. if (!bridge)
  338. return -ENOMEM;
  339. if (list_empty(&bridge->dma_ranges)) {
  340. of_property_read_u32(np, "cdns,no-bar-match-nbits",
  341. &no_bar_nbits);
  342. err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
  343. (u64)1 << no_bar_nbits, 0);
  344. if (err)
  345. dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
  346. return err;
  347. }
  348. list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
  349. resource_list_for_each_entry(entry, &bridge->dma_ranges) {
  350. err = cdns_pcie_host_bar_config(rc, entry);
  351. if (err) {
  352. dev_err(dev, "Fail to configure IB using dma-ranges\n");
  353. return err;
  354. }
  355. }
  356. return 0;
  357. }
  358. static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
  359. {
  360. struct cdns_pcie *pcie = &rc->pcie;
  361. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
  362. struct resource *cfg_res = rc->cfg_res;
  363. struct resource_entry *entry;
  364. u64 cpu_addr = cfg_res->start;
  365. u32 addr0, addr1, desc1;
  366. int r, busnr = 0;
  367. entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
  368. if (entry)
  369. busnr = entry->res->start;
  370. /*
  371. * Reserve region 0 for PCI configure space accesses:
  372. * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
  373. * cdns_pci_map_bus(), other region registers are set here once for all.
  374. */
  375. addr1 = 0; /* Should be programmed to zero. */
  376. desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
  377. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
  378. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
  379. if (pcie->ops->cpu_addr_fixup)
  380. cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
  381. addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
  382. (lower_32_bits(cpu_addr) & GENMASK(31, 8));
  383. addr1 = upper_32_bits(cpu_addr);
  384. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
  385. cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
  386. r = 1;
  387. resource_list_for_each_entry(entry, &bridge->windows) {
  388. struct resource *res = entry->res;
  389. u64 pci_addr = res->start - entry->offset;
  390. if (resource_type(res) == IORESOURCE_IO)
  391. cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
  392. true,
  393. pci_pio_to_address(res->start),
  394. pci_addr,
  395. resource_size(res));
  396. else
  397. cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
  398. false,
  399. res->start,
  400. pci_addr,
  401. resource_size(res));
  402. r++;
  403. }
  404. return cdns_pcie_host_map_dma_ranges(rc);
  405. }
  406. static int cdns_pcie_host_init(struct device *dev,
  407. struct cdns_pcie_rc *rc)
  408. {
  409. int err;
  410. err = cdns_pcie_host_init_root_port(rc);
  411. if (err)
  412. return err;
  413. return cdns_pcie_host_init_address_translation(rc);
  414. }
  415. int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
  416. {
  417. struct device *dev = rc->pcie.dev;
  418. struct platform_device *pdev = to_platform_device(dev);
  419. struct device_node *np = dev->of_node;
  420. struct pci_host_bridge *bridge;
  421. enum cdns_pcie_rp_bar bar;
  422. struct cdns_pcie *pcie;
  423. struct resource *res;
  424. int ret;
  425. bridge = pci_host_bridge_from_priv(rc);
  426. if (!bridge)
  427. return -ENOMEM;
  428. pcie = &rc->pcie;
  429. pcie->is_rc = true;
  430. rc->vendor_id = 0xffff;
  431. of_property_read_u32(np, "vendor-id", &rc->vendor_id);
  432. rc->device_id = 0xffff;
  433. of_property_read_u32(np, "device-id", &rc->device_id);
  434. pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
  435. if (IS_ERR(pcie->reg_base)) {
  436. dev_err(dev, "missing \"reg\"\n");
  437. return PTR_ERR(pcie->reg_base);
  438. }
  439. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
  440. rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
  441. if (IS_ERR(rc->cfg_base))
  442. return PTR_ERR(rc->cfg_base);
  443. rc->cfg_res = res;
  444. if (rc->quirk_detect_quiet_flag)
  445. cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
  446. cdns_pcie_host_enable_ptm_response(pcie);
  447. ret = cdns_pcie_start_link(pcie);
  448. if (ret) {
  449. dev_err(dev, "Failed to start link\n");
  450. return ret;
  451. }
  452. ret = cdns_pcie_host_start_link(rc);
  453. if (ret)
  454. dev_dbg(dev, "PCIe link never came up\n");
  455. for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
  456. rc->avail_ib_bar[bar] = true;
  457. ret = cdns_pcie_host_init(dev, rc);
  458. if (ret)
  459. return ret;
  460. if (!bridge->ops)
  461. bridge->ops = &cdns_pcie_host_ops;
  462. ret = pci_host_probe(bridge);
  463. if (ret < 0)
  464. goto err_init;
  465. return 0;
  466. err_init:
  467. pm_runtime_put_sync(dev);
  468. return ret;
  469. }