pcie-rockchip-ep.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Rockchip AXI PCIe endpoint controller driver
  4. *
  5. * Copyright (c) 2018 Rockchip, Inc.
  6. *
  7. * Author: Shawn Lin <[email protected]>
  8. * Simon Xue <[email protected]>
  9. */
  10. #include <linux/configfs.h>
  11. #include <linux/delay.h>
  12. #include <linux/kernel.h>
  13. #include <linux/of.h>
  14. #include <linux/pci-epc.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/pci-epf.h>
  17. #include <linux/sizes.h>
  18. #include "pcie-rockchip.h"
  19. /**
  20. * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
  21. * @rockchip: Rockchip PCIe controller
  22. * @epc: PCI EPC device
  23. * @max_regions: maximum number of regions supported by hardware
  24. * @ob_region_map: bitmask of mapped outbound regions
  25. * @ob_addr: base addresses in the AXI bus where the outbound regions start
  26. * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
  27. * dedicated outbound regions is mapped.
  28. * @irq_cpu_addr: base address in the CPU space where a write access triggers
  29. * the sending of a memory write (MSI) / normal message (legacy
  30. * IRQ) TLP through the PCIe bus.
  31. * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
  32. * dedicated outbound region.
  33. * @irq_pci_fn: the latest PCI function that has updated the mapping of
  34. * the MSI/legacy IRQ dedicated outbound region.
  35. * @irq_pending: bitmask of asserted legacy IRQs.
  36. */
  37. struct rockchip_pcie_ep {
  38. struct rockchip_pcie rockchip;
  39. struct pci_epc *epc;
  40. u32 max_regions;
  41. unsigned long ob_region_map;
  42. phys_addr_t *ob_addr;
  43. phys_addr_t irq_phys_addr;
  44. void __iomem *irq_cpu_addr;
  45. u64 irq_pci_addr;
  46. u8 irq_pci_fn;
  47. u8 irq_pending;
  48. };
  49. static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
  50. u32 region)
  51. {
  52. rockchip_pcie_write(rockchip, 0,
  53. ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
  54. rockchip_pcie_write(rockchip, 0,
  55. ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
  56. rockchip_pcie_write(rockchip, 0,
  57. ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
  58. rockchip_pcie_write(rockchip, 0,
  59. ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
  60. }
  61. static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
  62. u32 r, u64 cpu_addr, u64 pci_addr,
  63. size_t size)
  64. {
  65. int num_pass_bits = fls64(size - 1);
  66. u32 addr0, addr1, desc0;
  67. if (num_pass_bits < 8)
  68. num_pass_bits = 8;
  69. addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
  70. (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
  71. addr1 = upper_32_bits(pci_addr);
  72. desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE;
  73. /* PCI bus address region */
  74. rockchip_pcie_write(rockchip, addr0,
  75. ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
  76. rockchip_pcie_write(rockchip, addr1,
  77. ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
  78. rockchip_pcie_write(rockchip, desc0,
  79. ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
  80. rockchip_pcie_write(rockchip, 0,
  81. ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
  82. }
  83. static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
  84. struct pci_epf_header *hdr)
  85. {
  86. u32 reg;
  87. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  88. struct rockchip_pcie *rockchip = &ep->rockchip;
  89. /* All functions share the same vendor ID with function 0 */
  90. if (fn == 0) {
  91. u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
  92. (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
  93. rockchip_pcie_write(rockchip, vid_regs,
  94. PCIE_CORE_CONFIG_VENDOR);
  95. }
  96. reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
  97. reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
  98. rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
  99. rockchip_pcie_write(rockchip,
  100. hdr->revid |
  101. hdr->progif_code << 8 |
  102. hdr->subclass_code << 16 |
  103. hdr->baseclass_code << 24,
  104. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
  105. rockchip_pcie_write(rockchip, hdr->cache_line_size,
  106. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  107. PCI_CACHE_LINE_SIZE);
  108. rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
  109. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  110. PCI_SUBSYSTEM_VENDOR_ID);
  111. rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
  112. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  113. PCI_INTERRUPT_LINE);
  114. return 0;
  115. }
  116. static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
  117. struct pci_epf_bar *epf_bar)
  118. {
  119. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  120. struct rockchip_pcie *rockchip = &ep->rockchip;
  121. dma_addr_t bar_phys = epf_bar->phys_addr;
  122. enum pci_barno bar = epf_bar->barno;
  123. int flags = epf_bar->flags;
  124. u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
  125. u64 sz;
  126. /* BAR size is 2^(aperture + 7) */
  127. sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
  128. /*
  129. * roundup_pow_of_two() returns an unsigned long, which is not suited
  130. * for 64bit values.
  131. */
  132. sz = 1ULL << fls64(sz - 1);
  133. aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
  134. if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
  135. ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
  136. } else {
  137. bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
  138. bool is_64bits = sz > SZ_2G;
  139. if (is_64bits && (bar & 1))
  140. return -EINVAL;
  141. if (is_64bits && is_prefetch)
  142. ctrl =
  143. ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
  144. else if (is_prefetch)
  145. ctrl =
  146. ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
  147. else if (is_64bits)
  148. ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
  149. else
  150. ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
  151. }
  152. if (bar < BAR_4) {
  153. reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
  154. b = bar;
  155. } else {
  156. reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
  157. b = bar - BAR_4;
  158. }
  159. addr0 = lower_32_bits(bar_phys);
  160. addr1 = upper_32_bits(bar_phys);
  161. cfg = rockchip_pcie_read(rockchip, reg);
  162. cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
  163. ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
  164. cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
  165. ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
  166. rockchip_pcie_write(rockchip, cfg, reg);
  167. rockchip_pcie_write(rockchip, addr0,
  168. ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
  169. rockchip_pcie_write(rockchip, addr1,
  170. ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
  171. return 0;
  172. }
  173. static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
  174. struct pci_epf_bar *epf_bar)
  175. {
  176. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  177. struct rockchip_pcie *rockchip = &ep->rockchip;
  178. u32 reg, cfg, b, ctrl;
  179. enum pci_barno bar = epf_bar->barno;
  180. if (bar < BAR_4) {
  181. reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
  182. b = bar;
  183. } else {
  184. reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
  185. b = bar - BAR_4;
  186. }
  187. ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
  188. cfg = rockchip_pcie_read(rockchip, reg);
  189. cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
  190. ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
  191. cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
  192. rockchip_pcie_write(rockchip, cfg, reg);
  193. rockchip_pcie_write(rockchip, 0x0,
  194. ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
  195. rockchip_pcie_write(rockchip, 0x0,
  196. ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
  197. }
  198. static inline u32 rockchip_ob_region(phys_addr_t addr)
  199. {
  200. return (addr >> ilog2(SZ_1M)) & 0x1f;
  201. }
  202. static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
  203. phys_addr_t addr, u64 pci_addr,
  204. size_t size)
  205. {
  206. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  207. struct rockchip_pcie *pcie = &ep->rockchip;
  208. u32 r = rockchip_ob_region(addr);
  209. rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
  210. set_bit(r, &ep->ob_region_map);
  211. ep->ob_addr[r] = addr;
  212. return 0;
  213. }
  214. static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
  215. phys_addr_t addr)
  216. {
  217. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  218. struct rockchip_pcie *rockchip = &ep->rockchip;
  219. u32 r;
  220. for (r = 0; r < ep->max_regions; r++)
  221. if (ep->ob_addr[r] == addr)
  222. break;
  223. if (r == ep->max_regions)
  224. return;
  225. rockchip_pcie_clear_ep_ob_atu(rockchip, r);
  226. ep->ob_addr[r] = 0;
  227. clear_bit(r, &ep->ob_region_map);
  228. }
  229. static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
  230. u8 multi_msg_cap)
  231. {
  232. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  233. struct rockchip_pcie *rockchip = &ep->rockchip;
  234. u32 flags;
  235. flags = rockchip_pcie_read(rockchip,
  236. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  237. ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  238. flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
  239. flags |=
  240. (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
  241. (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
  242. flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
  243. rockchip_pcie_write(rockchip, flags,
  244. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  245. ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  246. return 0;
  247. }
  248. static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
  249. {
  250. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  251. struct rockchip_pcie *rockchip = &ep->rockchip;
  252. u32 flags;
  253. flags = rockchip_pcie_read(rockchip,
  254. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  255. ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  256. if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
  257. return -EINVAL;
  258. return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
  259. ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
  260. }
  261. static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
  262. u8 intx, bool do_assert)
  263. {
  264. struct rockchip_pcie *rockchip = &ep->rockchip;
  265. intx &= 3;
  266. if (do_assert) {
  267. ep->irq_pending |= BIT(intx);
  268. rockchip_pcie_write(rockchip,
  269. PCIE_CLIENT_INT_IN_ASSERT |
  270. PCIE_CLIENT_INT_PEND_ST_PEND,
  271. PCIE_CLIENT_LEGACY_INT_CTRL);
  272. } else {
  273. ep->irq_pending &= ~BIT(intx);
  274. rockchip_pcie_write(rockchip,
  275. PCIE_CLIENT_INT_IN_DEASSERT |
  276. PCIE_CLIENT_INT_PEND_ST_NORMAL,
  277. PCIE_CLIENT_LEGACY_INT_CTRL);
  278. }
  279. }
  280. static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
  281. u8 intx)
  282. {
  283. u16 cmd;
  284. cmd = rockchip_pcie_read(&ep->rockchip,
  285. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  286. ROCKCHIP_PCIE_EP_CMD_STATUS);
  287. if (cmd & PCI_COMMAND_INTX_DISABLE)
  288. return -EINVAL;
  289. /*
  290. * Should add some delay between toggling INTx per TRM vaguely saying
  291. * it depends on some cycles of the AHB bus clock to function it. So
  292. * add sufficient 1ms here.
  293. */
  294. rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
  295. mdelay(1);
  296. rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
  297. return 0;
  298. }
  299. static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
  300. u8 interrupt_num)
  301. {
  302. struct rockchip_pcie *rockchip = &ep->rockchip;
  303. u32 flags, mme, data, data_mask;
  304. u8 msi_count;
  305. u64 pci_addr;
  306. u32 r;
  307. /* Check MSI enable bit */
  308. flags = rockchip_pcie_read(&ep->rockchip,
  309. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  310. ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  311. if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
  312. return -EINVAL;
  313. /* Get MSI numbers from MME */
  314. mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
  315. ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
  316. msi_count = 1 << mme;
  317. if (!interrupt_num || interrupt_num > msi_count)
  318. return -EINVAL;
  319. /* Set MSI private data */
  320. data_mask = msi_count - 1;
  321. data = rockchip_pcie_read(rockchip,
  322. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  323. ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
  324. PCI_MSI_DATA_64);
  325. data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
  326. /* Get MSI PCI address */
  327. pci_addr = rockchip_pcie_read(rockchip,
  328. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  329. ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
  330. PCI_MSI_ADDRESS_HI);
  331. pci_addr <<= 32;
  332. pci_addr |= rockchip_pcie_read(rockchip,
  333. ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
  334. ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
  335. PCI_MSI_ADDRESS_LO);
  336. /* Set the outbound region if needed. */
  337. if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
  338. ep->irq_pci_fn != fn)) {
  339. r = rockchip_ob_region(ep->irq_phys_addr);
  340. rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
  341. ep->irq_phys_addr,
  342. pci_addr & PCIE_ADDR_MASK,
  343. ~PCIE_ADDR_MASK + 1);
  344. ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
  345. ep->irq_pci_fn = fn;
  346. }
  347. writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
  348. return 0;
  349. }
  350. static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
  351. enum pci_epc_irq_type type,
  352. u16 interrupt_num)
  353. {
  354. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  355. switch (type) {
  356. case PCI_EPC_IRQ_LEGACY:
  357. return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
  358. case PCI_EPC_IRQ_MSI:
  359. return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
  360. default:
  361. return -EINVAL;
  362. }
  363. }
  364. static int rockchip_pcie_ep_start(struct pci_epc *epc)
  365. {
  366. struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
  367. struct rockchip_pcie *rockchip = &ep->rockchip;
  368. struct pci_epf *epf;
  369. u32 cfg;
  370. cfg = BIT(0);
  371. list_for_each_entry(epf, &epc->pci_epf, list)
  372. cfg |= BIT(epf->func_no);
  373. rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
  374. return 0;
  375. }
  376. static const struct pci_epc_features rockchip_pcie_epc_features = {
  377. .linkup_notifier = false,
  378. .msi_capable = true,
  379. .msix_capable = false,
  380. .align = 256,
  381. };
  382. static const struct pci_epc_features*
  383. rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
  384. {
  385. return &rockchip_pcie_epc_features;
  386. }
  387. static const struct pci_epc_ops rockchip_pcie_epc_ops = {
  388. .write_header = rockchip_pcie_ep_write_header,
  389. .set_bar = rockchip_pcie_ep_set_bar,
  390. .clear_bar = rockchip_pcie_ep_clear_bar,
  391. .map_addr = rockchip_pcie_ep_map_addr,
  392. .unmap_addr = rockchip_pcie_ep_unmap_addr,
  393. .set_msi = rockchip_pcie_ep_set_msi,
  394. .get_msi = rockchip_pcie_ep_get_msi,
  395. .raise_irq = rockchip_pcie_ep_raise_irq,
  396. .start = rockchip_pcie_ep_start,
  397. .get_features = rockchip_pcie_ep_get_features,
  398. };
  399. static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
  400. struct rockchip_pcie_ep *ep)
  401. {
  402. struct device *dev = rockchip->dev;
  403. int err;
  404. err = rockchip_pcie_parse_dt(rockchip);
  405. if (err)
  406. return err;
  407. err = rockchip_pcie_get_phys(rockchip);
  408. if (err)
  409. return err;
  410. err = of_property_read_u32(dev->of_node,
  411. "rockchip,max-outbound-regions",
  412. &ep->max_regions);
  413. if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
  414. ep->max_regions = MAX_REGION_LIMIT;
  415. ep->ob_region_map = 0;
  416. err = of_property_read_u8(dev->of_node, "max-functions",
  417. &ep->epc->max_functions);
  418. if (err < 0)
  419. ep->epc->max_functions = 1;
  420. return 0;
  421. }
  422. static const struct of_device_id rockchip_pcie_ep_of_match[] = {
  423. { .compatible = "rockchip,rk3399-pcie-ep"},
  424. {},
  425. };
  426. static int rockchip_pcie_ep_probe(struct platform_device *pdev)
  427. {
  428. struct device *dev = &pdev->dev;
  429. struct rockchip_pcie_ep *ep;
  430. struct rockchip_pcie *rockchip;
  431. struct pci_epc *epc;
  432. size_t max_regions;
  433. struct pci_epc_mem_window *windows = NULL;
  434. int err, i;
  435. u32 cfg_msi, cfg_msix_cp;
  436. ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
  437. if (!ep)
  438. return -ENOMEM;
  439. rockchip = &ep->rockchip;
  440. rockchip->is_rc = false;
  441. rockchip->dev = dev;
  442. epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
  443. if (IS_ERR(epc)) {
  444. dev_err(dev, "failed to create epc device\n");
  445. return PTR_ERR(epc);
  446. }
  447. ep->epc = epc;
  448. epc_set_drvdata(epc, ep);
  449. err = rockchip_pcie_parse_ep_dt(rockchip, ep);
  450. if (err)
  451. return err;
  452. err = rockchip_pcie_enable_clocks(rockchip);
  453. if (err)
  454. return err;
  455. err = rockchip_pcie_init_port(rockchip);
  456. if (err)
  457. goto err_disable_clocks;
  458. /* Establish the link automatically */
  459. rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
  460. PCIE_CLIENT_CONFIG);
  461. max_regions = ep->max_regions;
  462. ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
  463. GFP_KERNEL);
  464. if (!ep->ob_addr) {
  465. err = -ENOMEM;
  466. goto err_uninit_port;
  467. }
  468. /* Only enable function 0 by default */
  469. rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
  470. windows = devm_kcalloc(dev, ep->max_regions,
  471. sizeof(struct pci_epc_mem_window), GFP_KERNEL);
  472. if (!windows) {
  473. err = -ENOMEM;
  474. goto err_uninit_port;
  475. }
  476. for (i = 0; i < ep->max_regions; i++) {
  477. windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
  478. windows[i].size = SZ_1M;
  479. windows[i].page_size = SZ_1M;
  480. }
  481. err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
  482. devm_kfree(dev, windows);
  483. if (err < 0) {
  484. dev_err(dev, "failed to initialize the memory space\n");
  485. goto err_uninit_port;
  486. }
  487. ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
  488. SZ_1M);
  489. if (!ep->irq_cpu_addr) {
  490. dev_err(dev, "failed to reserve memory space for MSI\n");
  491. err = -ENOMEM;
  492. goto err_epc_mem_exit;
  493. }
  494. ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
  495. /*
  496. * MSI-X is not supported but the controller still advertises the MSI-X
  497. * capability by default, which can lead to the Root Complex side
  498. * allocating MSI-X vectors which cannot be used. Avoid this by skipping
  499. * the MSI-X capability entry in the PCIe capabilities linked-list: get
  500. * the next pointer from the MSI-X entry and set that in the MSI
  501. * capability entry (which is the previous entry). This way the MSI-X
  502. * entry is skipped (left out of the linked-list) and not advertised.
  503. */
  504. cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
  505. ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  506. cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK;
  507. cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE +
  508. ROCKCHIP_PCIE_EP_MSIX_CAP_REG) &
  509. ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK;
  510. cfg_msi |= cfg_msix_cp;
  511. rockchip_pcie_write(rockchip, cfg_msi,
  512. PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
  513. rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
  514. PCIE_CLIENT_CONFIG);
  515. return 0;
  516. err_epc_mem_exit:
  517. pci_epc_mem_exit(epc);
  518. err_uninit_port:
  519. rockchip_pcie_deinit_phys(rockchip);
  520. err_disable_clocks:
  521. rockchip_pcie_disable_clocks(rockchip);
  522. return err;
  523. }
  524. static struct platform_driver rockchip_pcie_ep_driver = {
  525. .driver = {
  526. .name = "rockchip-pcie-ep",
  527. .of_match_table = rockchip_pcie_ep_of_match,
  528. },
  529. .probe = rockchip_pcie_ep_probe,
  530. };
  531. builtin_platform_driver(rockchip_pcie_ep_driver);