pcie-xilinx-cpm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
  4. *
  5. * (C) Copyright 2019 - 2020, Xilinx, Inc.
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/irq.h>
  10. #include <linux/irqchip.h>
  11. #include <linux/irqchip/chained_irq.h>
  12. #include <linux/irqdomain.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_pci.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/pci.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pci-ecam.h>
  22. #include "../pci.h"
  23. /* Register definitions */
  24. #define XILINX_CPM_PCIE_REG_IDR 0x00000E10
  25. #define XILINX_CPM_PCIE_REG_IMR 0x00000E14
  26. #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
  27. #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
  28. #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
  29. #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
  30. #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
  31. #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
  32. #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
  33. #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
  34. #define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
  35. #define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
  36. #define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
  37. /* Interrupt registers definitions */
  38. #define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
  39. #define XILINX_CPM_PCIE_INTR_HOT_RESET 3
  40. #define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
  41. #define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
  42. #define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
  43. #define XILINX_CPM_PCIE_INTR_NONFATAL 10
  44. #define XILINX_CPM_PCIE_INTR_FATAL 11
  45. #define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
  46. #define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
  47. #define XILINX_CPM_PCIE_INTR_INTX 16
  48. #define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
  49. #define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
  50. #define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
  51. #define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
  52. #define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
  53. #define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
  54. #define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
  55. #define XILINX_CPM_PCIE_INTR_MST_DECERR 26
  56. #define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
  57. #define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
  58. #define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
  59. #define XILINX_CPM_PCIE_IMR_ALL_MASK \
  60. ( \
  61. IMR(LINK_DOWN) | \
  62. IMR(HOT_RESET) | \
  63. IMR(CFG_PCIE_TIMEOUT) | \
  64. IMR(CFG_TIMEOUT) | \
  65. IMR(CORRECTABLE) | \
  66. IMR(NONFATAL) | \
  67. IMR(FATAL) | \
  68. IMR(CFG_ERR_POISON) | \
  69. IMR(PME_TO_ACK_RCVD) | \
  70. IMR(INTX) | \
  71. IMR(PM_PME_RCVD) | \
  72. IMR(SLV_UNSUPP) | \
  73. IMR(SLV_UNEXP) | \
  74. IMR(SLV_COMPL) | \
  75. IMR(SLV_ERRP) | \
  76. IMR(SLV_CMPABT) | \
  77. IMR(SLV_ILLBUR) | \
  78. IMR(MST_DECERR) | \
  79. IMR(MST_SLVERR) | \
  80. IMR(SLV_PCIE_TIMEOUT) \
  81. )
  82. #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
  83. #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
  84. #define XILINX_CPM_PCIE_IDRN_SHIFT 16
  85. /* Root Port Error FIFO Read Register definitions */
  86. #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
  87. #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
  88. #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
  89. /* Root Port Status/control Register definitions */
  90. #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
  91. /* Phy Status/Control Register definitions */
  92. #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
  93. enum xilinx_cpm_version {
  94. CPM,
  95. CPM5,
  96. };
  97. /**
  98. * struct xilinx_cpm_variant - CPM variant information
  99. * @version: CPM version
  100. */
  101. struct xilinx_cpm_variant {
  102. enum xilinx_cpm_version version;
  103. };
  104. /**
  105. * struct xilinx_cpm_pcie - PCIe port information
  106. * @dev: Device pointer
  107. * @reg_base: Bridge Register Base
  108. * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
  109. * @intx_domain: Legacy IRQ domain pointer
  110. * @cpm_domain: CPM IRQ domain pointer
  111. * @cfg: Holds mappings of config space window
  112. * @intx_irq: legacy interrupt number
  113. * @irq: Error interrupt number
  114. * @lock: lock protecting shared register access
  115. * @variant: CPM version check pointer
  116. */
  117. struct xilinx_cpm_pcie {
  118. struct device *dev;
  119. void __iomem *reg_base;
  120. void __iomem *cpm_base;
  121. struct irq_domain *intx_domain;
  122. struct irq_domain *cpm_domain;
  123. struct pci_config_window *cfg;
  124. int intx_irq;
  125. int irq;
  126. raw_spinlock_t lock;
  127. const struct xilinx_cpm_variant *variant;
  128. };
  129. static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
  130. {
  131. return readl_relaxed(port->reg_base + reg);
  132. }
  133. static void pcie_write(struct xilinx_cpm_pcie *port,
  134. u32 val, u32 reg)
  135. {
  136. writel_relaxed(val, port->reg_base + reg);
  137. }
  138. static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port)
  139. {
  140. return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
  141. XILINX_CPM_PCIE_REG_PSCR_LNKUP);
  142. }
  143. static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port)
  144. {
  145. unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
  146. if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
  147. dev_dbg(port->dev, "Requester ID %lu\n",
  148. val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
  149. pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
  150. XILINX_CPM_PCIE_REG_RPEFR);
  151. }
  152. }
  153. static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
  154. {
  155. struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
  156. unsigned long flags;
  157. u32 mask;
  158. u32 val;
  159. mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
  160. raw_spin_lock_irqsave(&port->lock, flags);
  161. val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
  162. pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
  163. raw_spin_unlock_irqrestore(&port->lock, flags);
  164. }
  165. static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
  166. {
  167. struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
  168. unsigned long flags;
  169. u32 mask;
  170. u32 val;
  171. mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
  172. raw_spin_lock_irqsave(&port->lock, flags);
  173. val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
  174. pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
  175. raw_spin_unlock_irqrestore(&port->lock, flags);
  176. }
  177. static struct irq_chip xilinx_cpm_leg_irq_chip = {
  178. .name = "INTx",
  179. .irq_mask = xilinx_cpm_mask_leg_irq,
  180. .irq_unmask = xilinx_cpm_unmask_leg_irq,
  181. };
  182. /**
  183. * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
  184. * @domain: IRQ domain
  185. * @irq: Virtual IRQ number
  186. * @hwirq: HW interrupt number
  187. *
  188. * Return: Always returns 0.
  189. */
  190. static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
  191. unsigned int irq, irq_hw_number_t hwirq)
  192. {
  193. irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
  194. handle_level_irq);
  195. irq_set_chip_data(irq, domain->host_data);
  196. irq_set_status_flags(irq, IRQ_LEVEL);
  197. return 0;
  198. }
  199. /* INTx IRQ Domain operations */
  200. static const struct irq_domain_ops intx_domain_ops = {
  201. .map = xilinx_cpm_pcie_intx_map,
  202. };
  203. static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
  204. {
  205. struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
  206. struct irq_chip *chip = irq_desc_get_chip(desc);
  207. unsigned long val;
  208. int i;
  209. chained_irq_enter(chip, desc);
  210. val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
  211. pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
  212. for_each_set_bit(i, &val, PCI_NUM_INTX)
  213. generic_handle_domain_irq(port->intx_domain, i);
  214. chained_irq_exit(chip, desc);
  215. }
  216. static void xilinx_cpm_mask_event_irq(struct irq_data *d)
  217. {
  218. struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
  219. u32 val;
  220. raw_spin_lock(&port->lock);
  221. val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
  222. val &= ~BIT(d->hwirq);
  223. pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
  224. raw_spin_unlock(&port->lock);
  225. }
  226. static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
  227. {
  228. struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
  229. u32 val;
  230. raw_spin_lock(&port->lock);
  231. val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
  232. val |= BIT(d->hwirq);
  233. pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
  234. raw_spin_unlock(&port->lock);
  235. }
  236. static struct irq_chip xilinx_cpm_event_irq_chip = {
  237. .name = "RC-Event",
  238. .irq_mask = xilinx_cpm_mask_event_irq,
  239. .irq_unmask = xilinx_cpm_unmask_event_irq,
  240. };
  241. static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
  242. unsigned int irq, irq_hw_number_t hwirq)
  243. {
  244. irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
  245. handle_level_irq);
  246. irq_set_chip_data(irq, domain->host_data);
  247. irq_set_status_flags(irq, IRQ_LEVEL);
  248. return 0;
  249. }
  250. static const struct irq_domain_ops event_domain_ops = {
  251. .map = xilinx_cpm_pcie_event_map,
  252. };
  253. static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
  254. {
  255. struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
  256. struct irq_chip *chip = irq_desc_get_chip(desc);
  257. unsigned long val;
  258. int i;
  259. chained_irq_enter(chip, desc);
  260. val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
  261. val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
  262. for_each_set_bit(i, &val, 32)
  263. generic_handle_domain_irq(port->cpm_domain, i);
  264. pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
  265. if (port->variant->version == CPM5) {
  266. val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
  267. if (val)
  268. writel_relaxed(val, port->cpm_base +
  269. XILINX_CPM_PCIE_IR_STATUS);
  270. }
  271. /*
  272. * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
  273. * CPM SLCR block.
  274. */
  275. val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
  276. if (val)
  277. writel_relaxed(val,
  278. port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
  279. chained_irq_exit(chip, desc);
  280. }
  281. #define _IC(x, s) \
  282. [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
  283. static const struct {
  284. const char *sym;
  285. const char *str;
  286. } intr_cause[32] = {
  287. _IC(LINK_DOWN, "Link Down"),
  288. _IC(HOT_RESET, "Hot reset"),
  289. _IC(CFG_TIMEOUT, "ECAM access timeout"),
  290. _IC(CORRECTABLE, "Correctable error message"),
  291. _IC(NONFATAL, "Non fatal error message"),
  292. _IC(FATAL, "Fatal error message"),
  293. _IC(SLV_UNSUPP, "Slave unsupported request"),
  294. _IC(SLV_UNEXP, "Slave unexpected completion"),
  295. _IC(SLV_COMPL, "Slave completion timeout"),
  296. _IC(SLV_ERRP, "Slave Error Poison"),
  297. _IC(SLV_CMPABT, "Slave Completer Abort"),
  298. _IC(SLV_ILLBUR, "Slave Illegal Burst"),
  299. _IC(MST_DECERR, "Master decode error"),
  300. _IC(MST_SLVERR, "Master slave error"),
  301. _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"),
  302. _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
  303. _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
  304. _IC(PM_PME_RCVD, "PM_PME message received"),
  305. _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"),
  306. };
  307. static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
  308. {
  309. struct xilinx_cpm_pcie *port = dev_id;
  310. struct device *dev = port->dev;
  311. struct irq_data *d;
  312. d = irq_domain_get_irq_data(port->cpm_domain, irq);
  313. switch (d->hwirq) {
  314. case XILINX_CPM_PCIE_INTR_CORRECTABLE:
  315. case XILINX_CPM_PCIE_INTR_NONFATAL:
  316. case XILINX_CPM_PCIE_INTR_FATAL:
  317. cpm_pcie_clear_err_interrupts(port);
  318. fallthrough;
  319. default:
  320. if (intr_cause[d->hwirq].str)
  321. dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
  322. else
  323. dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
  324. }
  325. return IRQ_HANDLED;
  326. }
  327. static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port)
  328. {
  329. if (port->intx_domain) {
  330. irq_domain_remove(port->intx_domain);
  331. port->intx_domain = NULL;
  332. }
  333. if (port->cpm_domain) {
  334. irq_domain_remove(port->cpm_domain);
  335. port->cpm_domain = NULL;
  336. }
  337. }
  338. /**
  339. * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
  340. * @port: PCIe port information
  341. *
  342. * Return: '0' on success and error value on failure
  343. */
  344. static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
  345. {
  346. struct device *dev = port->dev;
  347. struct device_node *node = dev->of_node;
  348. struct device_node *pcie_intc_node;
  349. /* Setup INTx */
  350. pcie_intc_node = of_get_next_child(node, NULL);
  351. if (!pcie_intc_node) {
  352. dev_err(dev, "No PCIe Intc node found\n");
  353. return -EINVAL;
  354. }
  355. port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
  356. &event_domain_ops,
  357. port);
  358. if (!port->cpm_domain)
  359. goto out;
  360. irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
  361. port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  362. &intx_domain_ops,
  363. port);
  364. if (!port->intx_domain)
  365. goto out;
  366. irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
  367. of_node_put(pcie_intc_node);
  368. raw_spin_lock_init(&port->lock);
  369. return 0;
  370. out:
  371. xilinx_cpm_free_irq_domains(port);
  372. of_node_put(pcie_intc_node);
  373. dev_err(dev, "Failed to allocate IRQ domains\n");
  374. return -ENOMEM;
  375. }
  376. static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
  377. {
  378. struct device *dev = port->dev;
  379. struct platform_device *pdev = to_platform_device(dev);
  380. int i, irq;
  381. port->irq = platform_get_irq(pdev, 0);
  382. if (port->irq < 0)
  383. return port->irq;
  384. for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
  385. int err;
  386. if (!intr_cause[i].str)
  387. continue;
  388. irq = irq_create_mapping(port->cpm_domain, i);
  389. if (!irq) {
  390. dev_err(dev, "Failed to map interrupt\n");
  391. return -ENXIO;
  392. }
  393. err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
  394. 0, intr_cause[i].sym, port);
  395. if (err) {
  396. dev_err(dev, "Failed to request IRQ %d\n", irq);
  397. return err;
  398. }
  399. }
  400. port->intx_irq = irq_create_mapping(port->cpm_domain,
  401. XILINX_CPM_PCIE_INTR_INTX);
  402. if (!port->intx_irq) {
  403. dev_err(dev, "Failed to map INTx interrupt\n");
  404. return -ENXIO;
  405. }
  406. /* Plug the INTx chained handler */
  407. irq_set_chained_handler_and_data(port->intx_irq,
  408. xilinx_cpm_pcie_intx_flow, port);
  409. /* Plug the main event chained handler */
  410. irq_set_chained_handler_and_data(port->irq,
  411. xilinx_cpm_pcie_event_flow, port);
  412. return 0;
  413. }
  414. /**
  415. * xilinx_cpm_pcie_init_port - Initialize hardware
  416. * @port: PCIe port information
  417. */
  418. static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
  419. {
  420. if (cpm_pcie_link_up(port))
  421. dev_info(port->dev, "PCIe Link is UP\n");
  422. else
  423. dev_info(port->dev, "PCIe Link is DOWN\n");
  424. /* Disable all interrupts */
  425. pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
  426. XILINX_CPM_PCIE_REG_IMR);
  427. /* Clear pending interrupts */
  428. pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
  429. XILINX_CPM_PCIE_IMR_ALL_MASK,
  430. XILINX_CPM_PCIE_REG_IDR);
  431. /*
  432. * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
  433. * CPM SLCR block.
  434. */
  435. writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
  436. port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
  437. if (port->variant->version == CPM5) {
  438. writel(XILINX_CPM_PCIE_IR_LOCAL,
  439. port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
  440. }
  441. /* Enable the Bridge enable bit */
  442. pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
  443. XILINX_CPM_PCIE_REG_RPSC_BEN,
  444. XILINX_CPM_PCIE_REG_RPSC);
  445. }
  446. /**
  447. * xilinx_cpm_pcie_parse_dt - Parse Device tree
  448. * @port: PCIe port information
  449. * @bus_range: Bus resource
  450. *
  451. * Return: '0' on success and error value on failure
  452. */
  453. static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
  454. struct resource *bus_range)
  455. {
  456. struct device *dev = port->dev;
  457. struct platform_device *pdev = to_platform_device(dev);
  458. struct resource *res;
  459. port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
  460. "cpm_slcr");
  461. if (IS_ERR(port->cpm_base))
  462. return PTR_ERR(port->cpm_base);
  463. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
  464. if (!res)
  465. return -ENXIO;
  466. port->cfg = pci_ecam_create(dev, res, bus_range,
  467. &pci_generic_ecam_ops);
  468. if (IS_ERR(port->cfg))
  469. return PTR_ERR(port->cfg);
  470. if (port->variant->version == CPM5) {
  471. port->reg_base = devm_platform_ioremap_resource_byname(pdev,
  472. "cpm_csr");
  473. if (IS_ERR(port->reg_base))
  474. return PTR_ERR(port->reg_base);
  475. } else {
  476. port->reg_base = port->cfg->win;
  477. }
  478. return 0;
  479. }
  480. static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port)
  481. {
  482. irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
  483. irq_set_chained_handler_and_data(port->irq, NULL, NULL);
  484. }
  485. /**
  486. * xilinx_cpm_pcie_probe - Probe function
  487. * @pdev: Platform device pointer
  488. *
  489. * Return: '0' on success and error value on failure
  490. */
  491. static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
  492. {
  493. struct xilinx_cpm_pcie *port;
  494. struct device *dev = &pdev->dev;
  495. struct pci_host_bridge *bridge;
  496. struct resource_entry *bus;
  497. int err;
  498. bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
  499. if (!bridge)
  500. return -ENODEV;
  501. port = pci_host_bridge_priv(bridge);
  502. port->dev = dev;
  503. err = xilinx_cpm_pcie_init_irq_domain(port);
  504. if (err)
  505. return err;
  506. bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
  507. if (!bus)
  508. return -ENODEV;
  509. port->variant = of_device_get_match_data(dev);
  510. err = xilinx_cpm_pcie_parse_dt(port, bus->res);
  511. if (err) {
  512. dev_err(dev, "Parsing DT failed\n");
  513. goto err_parse_dt;
  514. }
  515. xilinx_cpm_pcie_init_port(port);
  516. err = xilinx_cpm_setup_irq(port);
  517. if (err) {
  518. dev_err(dev, "Failed to set up interrupts\n");
  519. goto err_setup_irq;
  520. }
  521. bridge->sysdata = port->cfg;
  522. bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
  523. err = pci_host_probe(bridge);
  524. if (err < 0)
  525. goto err_host_bridge;
  526. return 0;
  527. err_host_bridge:
  528. xilinx_cpm_free_interrupts(port);
  529. err_setup_irq:
  530. pci_ecam_free(port->cfg);
  531. err_parse_dt:
  532. xilinx_cpm_free_irq_domains(port);
  533. return err;
  534. }
  535. static const struct xilinx_cpm_variant cpm_host = {
  536. .version = CPM,
  537. };
  538. static const struct xilinx_cpm_variant cpm5_host = {
  539. .version = CPM5,
  540. };
  541. static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
  542. {
  543. .compatible = "xlnx,versal-cpm-host-1.00",
  544. .data = &cpm_host,
  545. },
  546. {
  547. .compatible = "xlnx,versal-cpm5-host",
  548. .data = &cpm5_host,
  549. },
  550. {}
  551. };
  552. static struct platform_driver xilinx_cpm_pcie_driver = {
  553. .driver = {
  554. .name = "xilinx-cpm-pcie",
  555. .of_match_table = xilinx_cpm_pcie_of_match,
  556. .suppress_bind_attrs = true,
  557. },
  558. .probe = xilinx_cpm_pcie_probe,
  559. };
  560. builtin_platform_driver(xilinx_cpm_pcie_driver);