pcie-designware-host.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Synopsys DesignWare PCIe host controller driver
  4. *
  5. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  6. * https://www.samsung.com
  7. *
  8. * Author: Jingoo Han <[email protected]>
  9. */
  10. #include <linux/irqchip/chained_irq.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/msi.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_pci.h>
  15. #include <linux/pci_regs.h>
  16. #include <linux/platform_device.h>
  17. #include "../../pci.h"
  18. #include "pcie-designware.h"
  19. static struct pci_ops dw_pcie_ops;
  20. static struct pci_ops dw_child_pcie_ops;
  21. static void dw_msi_ack_irq(struct irq_data *d)
  22. {
  23. irq_chip_ack_parent(d);
  24. }
  25. static void dw_msi_mask_irq(struct irq_data *d)
  26. {
  27. pci_msi_mask_irq(d);
  28. irq_chip_mask_parent(d);
  29. }
  30. static void dw_msi_unmask_irq(struct irq_data *d)
  31. {
  32. pci_msi_unmask_irq(d);
  33. irq_chip_unmask_parent(d);
  34. }
  35. static struct irq_chip dw_pcie_msi_irq_chip = {
  36. .name = "PCI-MSI",
  37. .irq_ack = dw_msi_ack_irq,
  38. .irq_mask = dw_msi_mask_irq,
  39. .irq_unmask = dw_msi_unmask_irq,
  40. };
  41. static struct msi_domain_info dw_pcie_msi_domain_info = {
  42. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  43. MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  44. .chip = &dw_pcie_msi_irq_chip,
  45. };
  46. /* MSI int handler */
  47. irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
  48. {
  49. int i, pos;
  50. unsigned long val;
  51. u32 status, num_ctrls;
  52. irqreturn_t ret = IRQ_NONE;
  53. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  54. num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  55. for (i = 0; i < num_ctrls; i++) {
  56. status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
  57. (i * MSI_REG_CTRL_BLOCK_SIZE));
  58. if (!status)
  59. continue;
  60. ret = IRQ_HANDLED;
  61. val = status;
  62. pos = 0;
  63. while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
  64. pos)) != MAX_MSI_IRQS_PER_CTRL) {
  65. generic_handle_domain_irq(pp->irq_domain,
  66. (i * MAX_MSI_IRQS_PER_CTRL) +
  67. pos);
  68. pos++;
  69. }
  70. }
  71. return ret;
  72. }
  73. EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
  74. /* Chained MSI interrupt service routine */
  75. static void dw_chained_msi_isr(struct irq_desc *desc)
  76. {
  77. struct irq_chip *chip = irq_desc_get_chip(desc);
  78. struct dw_pcie_rp *pp;
  79. chained_irq_enter(chip, desc);
  80. pp = irq_desc_get_handler_data(desc);
  81. dw_handle_msi_irq(pp);
  82. chained_irq_exit(chip, desc);
  83. }
  84. static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
  85. {
  86. struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
  87. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  88. u64 msi_target;
  89. msi_target = (u64)pp->msi_data;
  90. msg->address_lo = lower_32_bits(msi_target);
  91. msg->address_hi = upper_32_bits(msi_target);
  92. msg->data = d->hwirq;
  93. dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
  94. (int)d->hwirq, msg->address_hi, msg->address_lo);
  95. }
  96. static int dw_pci_msi_set_affinity(struct irq_data *d,
  97. const struct cpumask *mask, bool force)
  98. {
  99. return -EINVAL;
  100. }
  101. static void dw_pci_bottom_mask(struct irq_data *d)
  102. {
  103. struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
  104. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  105. unsigned int res, bit, ctrl;
  106. unsigned long flags;
  107. raw_spin_lock_irqsave(&pp->lock, flags);
  108. ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
  109. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  110. bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
  111. pp->irq_mask[ctrl] |= BIT(bit);
  112. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
  113. raw_spin_unlock_irqrestore(&pp->lock, flags);
  114. }
  115. static void dw_pci_bottom_unmask(struct irq_data *d)
  116. {
  117. struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
  118. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  119. unsigned int res, bit, ctrl;
  120. unsigned long flags;
  121. raw_spin_lock_irqsave(&pp->lock, flags);
  122. ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
  123. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  124. bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
  125. pp->irq_mask[ctrl] &= ~BIT(bit);
  126. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
  127. raw_spin_unlock_irqrestore(&pp->lock, flags);
  128. }
  129. static void dw_pci_bottom_ack(struct irq_data *d)
  130. {
  131. struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
  132. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  133. unsigned int res, bit, ctrl;
  134. ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
  135. res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
  136. bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
  137. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
  138. }
  139. static struct irq_chip dw_pci_msi_bottom_irq_chip = {
  140. .name = "DWPCI-MSI",
  141. .irq_ack = dw_pci_bottom_ack,
  142. .irq_compose_msi_msg = dw_pci_setup_msi_msg,
  143. .irq_set_affinity = dw_pci_msi_set_affinity,
  144. .irq_mask = dw_pci_bottom_mask,
  145. .irq_unmask = dw_pci_bottom_unmask,
  146. };
  147. static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
  148. unsigned int virq, unsigned int nr_irqs,
  149. void *args)
  150. {
  151. struct dw_pcie_rp *pp = domain->host_data;
  152. unsigned long flags;
  153. u32 i;
  154. int bit;
  155. raw_spin_lock_irqsave(&pp->lock, flags);
  156. bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
  157. order_base_2(nr_irqs));
  158. raw_spin_unlock_irqrestore(&pp->lock, flags);
  159. if (bit < 0)
  160. return -ENOSPC;
  161. for (i = 0; i < nr_irqs; i++)
  162. irq_domain_set_info(domain, virq + i, bit + i,
  163. pp->msi_irq_chip,
  164. pp, handle_edge_irq,
  165. NULL, NULL);
  166. return 0;
  167. }
  168. static void dw_pcie_irq_domain_free(struct irq_domain *domain,
  169. unsigned int virq, unsigned int nr_irqs)
  170. {
  171. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  172. struct dw_pcie_rp *pp = domain->host_data;
  173. unsigned long flags;
  174. raw_spin_lock_irqsave(&pp->lock, flags);
  175. bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
  176. order_base_2(nr_irqs));
  177. raw_spin_unlock_irqrestore(&pp->lock, flags);
  178. }
  179. static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
  180. .alloc = dw_pcie_irq_domain_alloc,
  181. .free = dw_pcie_irq_domain_free,
  182. };
  183. int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
  184. {
  185. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  186. struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
  187. pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
  188. &dw_pcie_msi_domain_ops, pp);
  189. if (!pp->irq_domain) {
  190. dev_err(pci->dev, "Failed to create IRQ domain\n");
  191. return -ENOMEM;
  192. }
  193. irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
  194. pp->msi_domain = pci_msi_create_irq_domain(fwnode,
  195. &dw_pcie_msi_domain_info,
  196. pp->irq_domain);
  197. if (!pp->msi_domain) {
  198. dev_err(pci->dev, "Failed to create MSI domain\n");
  199. irq_domain_remove(pp->irq_domain);
  200. return -ENOMEM;
  201. }
  202. return 0;
  203. }
  204. static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
  205. {
  206. u32 ctrl;
  207. for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
  208. if (pp->msi_irq[ctrl] > 0)
  209. irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
  210. NULL, NULL);
  211. }
  212. irq_domain_remove(pp->msi_domain);
  213. irq_domain_remove(pp->irq_domain);
  214. }
  215. static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
  216. {
  217. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  218. u64 msi_target = (u64)pp->msi_data;
  219. if (!pci_msi_enabled() || !pp->has_msi_ctrl)
  220. return;
  221. /* Program the msi_data */
  222. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
  223. dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
  224. }
  225. static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
  226. {
  227. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  228. struct device *dev = pci->dev;
  229. struct platform_device *pdev = to_platform_device(dev);
  230. u32 ctrl, max_vectors;
  231. int irq;
  232. /* Parse any "msiX" IRQs described in the devicetree */
  233. for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
  234. char msi_name[] = "msiX";
  235. msi_name[3] = '0' + ctrl;
  236. irq = platform_get_irq_byname_optional(pdev, msi_name);
  237. if (irq == -ENXIO)
  238. break;
  239. if (irq < 0)
  240. return dev_err_probe(dev, irq,
  241. "Failed to parse MSI IRQ '%s'\n",
  242. msi_name);
  243. pp->msi_irq[ctrl] = irq;
  244. }
  245. /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
  246. if (ctrl == 0)
  247. return -ENXIO;
  248. max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
  249. if (pp->num_vectors > max_vectors) {
  250. dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
  251. max_vectors);
  252. pp->num_vectors = max_vectors;
  253. }
  254. if (!pp->num_vectors)
  255. pp->num_vectors = max_vectors;
  256. return 0;
  257. }
  258. static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
  259. {
  260. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  261. struct device *dev = pci->dev;
  262. struct platform_device *pdev = to_platform_device(dev);
  263. u64 *msi_vaddr;
  264. int ret;
  265. u32 ctrl, num_ctrls;
  266. for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
  267. pp->irq_mask[ctrl] = ~0;
  268. if (!pp->msi_irq[0]) {
  269. ret = dw_pcie_parse_split_msi_irq(pp);
  270. if (ret < 0 && ret != -ENXIO)
  271. return ret;
  272. }
  273. if (!pp->num_vectors)
  274. pp->num_vectors = MSI_DEF_NUM_VECTORS;
  275. num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  276. if (!pp->msi_irq[0]) {
  277. pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
  278. if (pp->msi_irq[0] < 0) {
  279. pp->msi_irq[0] = platform_get_irq(pdev, 0);
  280. if (pp->msi_irq[0] < 0)
  281. return pp->msi_irq[0];
  282. }
  283. }
  284. dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
  285. pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
  286. ret = dw_pcie_allocate_domains(pp);
  287. if (ret)
  288. return ret;
  289. for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
  290. if (pp->msi_irq[ctrl] > 0)
  291. irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
  292. dw_chained_msi_isr, pp);
  293. }
  294. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  295. if (ret)
  296. dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
  297. msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
  298. GFP_KERNEL);
  299. if (!msi_vaddr) {
  300. dev_err(dev, "Failed to alloc and map MSI data\n");
  301. dw_pcie_free_msi(pp);
  302. return -ENOMEM;
  303. }
  304. return 0;
  305. }
  306. int dw_pcie_host_init(struct dw_pcie_rp *pp)
  307. {
  308. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  309. struct device *dev = pci->dev;
  310. struct device_node *np = dev->of_node;
  311. struct platform_device *pdev = to_platform_device(dev);
  312. struct resource_entry *win;
  313. struct pci_host_bridge *bridge;
  314. struct resource *res;
  315. int ret;
  316. raw_spin_lock_init(&pp->lock);
  317. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  318. if (res) {
  319. pp->cfg0_size = resource_size(res);
  320. pp->cfg0_base = res->start;
  321. pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
  322. if (IS_ERR(pp->va_cfg0_base))
  323. return PTR_ERR(pp->va_cfg0_base);
  324. } else {
  325. dev_err(dev, "Missing *config* reg space\n");
  326. return -ENODEV;
  327. }
  328. if (!pci->dbi_base) {
  329. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
  330. pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
  331. if (IS_ERR(pci->dbi_base))
  332. return PTR_ERR(pci->dbi_base);
  333. }
  334. bridge = devm_pci_alloc_host_bridge(dev, 0);
  335. if (!bridge)
  336. return -ENOMEM;
  337. pp->bridge = bridge;
  338. /* Get the I/O range from DT */
  339. win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
  340. if (win) {
  341. pp->io_size = resource_size(win->res);
  342. pp->io_bus_addr = win->res->start - win->offset;
  343. pp->io_base = pci_pio_to_address(win->res->start);
  344. }
  345. if (pci->link_gen < 1)
  346. pci->link_gen = of_pci_get_max_link_speed(np);
  347. /* Set default bus ops */
  348. bridge->ops = &dw_pcie_ops;
  349. bridge->child_ops = &dw_child_pcie_ops;
  350. if (pp->ops->host_init) {
  351. ret = pp->ops->host_init(pp);
  352. if (ret)
  353. return ret;
  354. }
  355. if (pci_msi_enabled()) {
  356. pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
  357. of_property_read_bool(np, "msi-parent") ||
  358. of_property_read_bool(np, "msi-map"));
  359. /*
  360. * For the has_msi_ctrl case the default assignment is handled
  361. * in the dw_pcie_msi_host_init().
  362. */
  363. if (!pp->has_msi_ctrl && !pp->num_vectors) {
  364. pp->num_vectors = MSI_DEF_NUM_VECTORS;
  365. } else if (pp->num_vectors > MAX_MSI_IRQS) {
  366. dev_err(dev, "Invalid number of vectors\n");
  367. ret = -EINVAL;
  368. goto err_deinit_host;
  369. }
  370. if (pp->ops->msi_host_init) {
  371. ret = pp->ops->msi_host_init(pp);
  372. if (ret < 0)
  373. goto err_deinit_host;
  374. } else if (pp->has_msi_ctrl) {
  375. ret = dw_pcie_msi_host_init(pp);
  376. if (ret < 0)
  377. goto err_deinit_host;
  378. }
  379. }
  380. dw_pcie_version_detect(pci);
  381. dw_pcie_iatu_detect(pci);
  382. ret = dw_pcie_setup_rc(pp);
  383. if (ret)
  384. goto err_free_msi;
  385. if (!dw_pcie_link_up(pci)) {
  386. ret = dw_pcie_start_link(pci);
  387. if (ret)
  388. goto err_free_msi;
  389. if (pci->ops && pci->ops->start_link) {
  390. /* Ignore errors, the link may come up later */
  391. dw_pcie_wait_for_link(pci);
  392. }
  393. }
  394. bridge->sysdata = pp;
  395. ret = pci_host_probe(bridge);
  396. if (ret)
  397. goto err_stop_link;
  398. return 0;
  399. err_stop_link:
  400. dw_pcie_stop_link(pci);
  401. err_free_msi:
  402. if (pp->has_msi_ctrl)
  403. dw_pcie_free_msi(pp);
  404. err_deinit_host:
  405. if (pp->ops->host_deinit)
  406. pp->ops->host_deinit(pp);
  407. return ret;
  408. }
  409. EXPORT_SYMBOL_GPL(dw_pcie_host_init);
  410. void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
  411. {
  412. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  413. pci_stop_root_bus(pp->bridge->bus);
  414. pci_remove_root_bus(pp->bridge->bus);
  415. dw_pcie_stop_link(pci);
  416. if (pp->has_msi_ctrl)
  417. dw_pcie_free_msi(pp);
  418. if (pp->ops->host_deinit)
  419. pp->ops->host_deinit(pp);
  420. }
  421. EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
  422. static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
  423. unsigned int devfn, int where)
  424. {
  425. struct dw_pcie_rp *pp = bus->sysdata;
  426. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  427. int type, ret;
  428. u32 busdev;
  429. /*
  430. * Checking whether the link is up here is a last line of defense
  431. * against platforms that forward errors on the system bus as
  432. * SError upon PCI configuration transactions issued when the link
  433. * is down. This check is racy by definition and does not stop
  434. * the system from triggering an SError if the link goes down
  435. * after this check is performed.
  436. */
  437. if (!dw_pcie_link_up(pci))
  438. return NULL;
  439. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  440. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  441. if (pci_is_root_bus(bus->parent))
  442. type = PCIE_ATU_TYPE_CFG0;
  443. else
  444. type = PCIE_ATU_TYPE_CFG1;
  445. ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
  446. pp->cfg0_size);
  447. if (ret)
  448. return NULL;
  449. return pp->va_cfg0_base + where;
  450. }
  451. static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
  452. int where, int size, u32 *val)
  453. {
  454. struct dw_pcie_rp *pp = bus->sysdata;
  455. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  456. int ret;
  457. ret = pci_generic_config_read(bus, devfn, where, size, val);
  458. if (ret != PCIBIOS_SUCCESSFUL)
  459. return ret;
  460. if (pp->cfg0_io_shared) {
  461. ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  462. pp->io_base, pp->io_bus_addr,
  463. pp->io_size);
  464. if (ret)
  465. return PCIBIOS_SET_FAILED;
  466. }
  467. return PCIBIOS_SUCCESSFUL;
  468. }
  469. static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
  470. int where, int size, u32 val)
  471. {
  472. struct dw_pcie_rp *pp = bus->sysdata;
  473. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  474. int ret;
  475. ret = pci_generic_config_write(bus, devfn, where, size, val);
  476. if (ret != PCIBIOS_SUCCESSFUL)
  477. return ret;
  478. if (pp->cfg0_io_shared) {
  479. ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
  480. pp->io_base, pp->io_bus_addr,
  481. pp->io_size);
  482. if (ret)
  483. return PCIBIOS_SET_FAILED;
  484. }
  485. return PCIBIOS_SUCCESSFUL;
  486. }
  487. static struct pci_ops dw_child_pcie_ops = {
  488. .map_bus = dw_pcie_other_conf_map_bus,
  489. .read = dw_pcie_rd_other_conf,
  490. .write = dw_pcie_wr_other_conf,
  491. };
  492. void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
  493. {
  494. struct dw_pcie_rp *pp = bus->sysdata;
  495. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  496. if (PCI_SLOT(devfn) > 0)
  497. return NULL;
  498. return pci->dbi_base + where;
  499. }
  500. EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
  501. static struct pci_ops dw_pcie_ops = {
  502. .map_bus = dw_pcie_own_conf_map_bus,
  503. .read = pci_generic_config_read,
  504. .write = pci_generic_config_write,
  505. };
  506. static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
  507. {
  508. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  509. struct resource_entry *entry;
  510. int i, ret;
  511. /* Note the very first outbound ATU is used for CFG IOs */
  512. if (!pci->num_ob_windows) {
  513. dev_err(pci->dev, "No outbound iATU found\n");
  514. return -EINVAL;
  515. }
  516. /*
  517. * Ensure all outbound windows are disabled before proceeding with
  518. * the MEM/IO ranges setups.
  519. */
  520. for (i = 0; i < pci->num_ob_windows; i++)
  521. dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
  522. i = 0;
  523. resource_list_for_each_entry(entry, &pp->bridge->windows) {
  524. if (resource_type(entry->res) != IORESOURCE_MEM)
  525. continue;
  526. if (pci->num_ob_windows <= ++i)
  527. break;
  528. ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
  529. entry->res->start,
  530. entry->res->start - entry->offset,
  531. resource_size(entry->res));
  532. if (ret) {
  533. dev_err(pci->dev, "Failed to set MEM range %pr\n",
  534. entry->res);
  535. return ret;
  536. }
  537. }
  538. if (pp->io_size) {
  539. if (pci->num_ob_windows > ++i) {
  540. ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
  541. pp->io_base,
  542. pp->io_bus_addr,
  543. pp->io_size);
  544. if (ret) {
  545. dev_err(pci->dev, "Failed to set IO range %pr\n",
  546. entry->res);
  547. return ret;
  548. }
  549. } else {
  550. pp->cfg0_io_shared = true;
  551. }
  552. }
  553. if (pci->num_ob_windows <= i)
  554. dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
  555. pci->num_ob_windows);
  556. return 0;
  557. }
  558. int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
  559. {
  560. struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  561. u32 val, ctrl, num_ctrls;
  562. int ret;
  563. /*
  564. * Enable DBI read-only registers for writing/updating configuration.
  565. * Write permission gets disabled towards the end of this function.
  566. */
  567. dw_pcie_dbi_ro_wr_en(pci);
  568. dw_pcie_setup(pci);
  569. if (pp->has_msi_ctrl) {
  570. num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  571. /* Initialize IRQ Status array */
  572. for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
  573. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
  574. (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
  575. pp->irq_mask[ctrl]);
  576. dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
  577. (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
  578. ~0);
  579. }
  580. }
  581. dw_pcie_msi_init(pp);
  582. /* Setup RC BARs */
  583. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
  584. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
  585. /* Setup interrupt pins */
  586. val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
  587. val &= 0xffff00ff;
  588. val |= 0x00000100;
  589. dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
  590. /* Setup bus numbers */
  591. val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
  592. val &= 0xff000000;
  593. val |= 0x00ff0100;
  594. dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
  595. /* Setup command register */
  596. val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
  597. val &= 0xffff0000;
  598. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  599. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  600. dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
  601. /*
  602. * If the platform provides its own child bus config accesses, it means
  603. * the platform uses its own address translation component rather than
  604. * ATU, so we should not program the ATU here.
  605. */
  606. if (pp->bridge->child_ops == &dw_child_pcie_ops) {
  607. ret = dw_pcie_iatu_setup(pp);
  608. if (ret)
  609. return ret;
  610. }
  611. dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
  612. /* Program correct class for RC */
  613. dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
  614. val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
  615. val |= PORT_LOGIC_SPEED_CHANGE;
  616. dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
  617. dw_pcie_dbi_ro_wr_dis(pci);
  618. return 0;
  619. }
  620. EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);