pcie-cadence-ep.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2017 Cadence
  3. // Cadence PCIe endpoint controller driver.
  4. // Author: Cyrille Pitchen <[email protected]>
  5. #include <linux/delay.h>
  6. #include <linux/kernel.h>
  7. #include <linux/of.h>
  8. #include <linux/pci-epc.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/sizes.h>
  11. #include "pcie-cadence.h"
  12. #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
  13. #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
  14. #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
  15. static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
  16. {
  17. u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
  18. u32 first_vf_offset, stride;
  19. if (vfn == 0)
  20. return fn;
  21. first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
  22. stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
  23. fn = fn + first_vf_offset + ((vfn - 1) * stride);
  24. return fn;
  25. }
  26. static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
  27. struct pci_epf_header *hdr)
  28. {
  29. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  30. u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
  31. struct cdns_pcie *pcie = &ep->pcie;
  32. u32 reg;
  33. if (vfn > 1) {
  34. dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
  35. return -EINVAL;
  36. } else if (vfn == 1) {
  37. reg = cap + PCI_SRIOV_VF_DID;
  38. cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
  39. return 0;
  40. }
  41. cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
  42. cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
  43. cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
  44. cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
  45. hdr->subclass_code | hdr->baseclass_code << 8);
  46. cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
  47. hdr->cache_line_size);
  48. cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
  49. cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
  50. /*
  51. * Vendor ID can only be modified from function 0, all other functions
  52. * use the same vendor ID as function 0.
  53. */
  54. if (fn == 0) {
  55. /* Update the vendor IDs. */
  56. u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
  57. CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
  58. cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
  59. }
  60. return 0;
  61. }
  62. static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
  63. struct pci_epf_bar *epf_bar)
  64. {
  65. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  66. struct cdns_pcie_epf *epf = &ep->epf[fn];
  67. struct cdns_pcie *pcie = &ep->pcie;
  68. dma_addr_t bar_phys = epf_bar->phys_addr;
  69. enum pci_barno bar = epf_bar->barno;
  70. int flags = epf_bar->flags;
  71. u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
  72. u64 sz;
  73. /* BAR size is 2^(aperture + 7) */
  74. sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
  75. /*
  76. * roundup_pow_of_two() returns an unsigned long, which is not suited
  77. * for 64bit values.
  78. */
  79. sz = 1ULL << fls64(sz - 1);
  80. aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
  81. if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
  82. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
  83. } else {
  84. bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
  85. bool is_64bits = sz > SZ_2G;
  86. if (is_64bits && (bar & 1))
  87. return -EINVAL;
  88. if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
  89. epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
  90. if (is_64bits && is_prefetch)
  91. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
  92. else if (is_prefetch)
  93. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
  94. else if (is_64bits)
  95. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
  96. else
  97. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
  98. }
  99. addr0 = lower_32_bits(bar_phys);
  100. addr1 = upper_32_bits(bar_phys);
  101. if (vfn == 1)
  102. reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
  103. else
  104. reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
  105. b = (bar < BAR_4) ? bar : bar - BAR_4;
  106. if (vfn == 0 || vfn == 1) {
  107. cfg = cdns_pcie_readl(pcie, reg);
  108. cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
  109. CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
  110. cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
  111. CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
  112. cdns_pcie_writel(pcie, reg, cfg);
  113. }
  114. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  115. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
  116. addr0);
  117. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
  118. addr1);
  119. if (vfn > 0)
  120. epf = &epf->epf[vfn - 1];
  121. epf->epf_bar[bar] = epf_bar;
  122. return 0;
  123. }
  124. static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
  125. struct pci_epf_bar *epf_bar)
  126. {
  127. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  128. struct cdns_pcie_epf *epf = &ep->epf[fn];
  129. struct cdns_pcie *pcie = &ep->pcie;
  130. enum pci_barno bar = epf_bar->barno;
  131. u32 reg, cfg, b, ctrl;
  132. if (vfn == 1)
  133. reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
  134. else
  135. reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
  136. b = (bar < BAR_4) ? bar : bar - BAR_4;
  137. if (vfn == 0 || vfn == 1) {
  138. ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
  139. cfg = cdns_pcie_readl(pcie, reg);
  140. cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
  141. CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
  142. cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
  143. cdns_pcie_writel(pcie, reg, cfg);
  144. }
  145. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  146. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
  147. cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
  148. if (vfn > 0)
  149. epf = &epf->epf[vfn - 1];
  150. epf->epf_bar[bar] = NULL;
  151. }
  152. static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
  153. phys_addr_t addr, u64 pci_addr, size_t size)
  154. {
  155. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  156. struct cdns_pcie *pcie = &ep->pcie;
  157. u32 r;
  158. r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
  159. if (r >= ep->max_regions - 1) {
  160. dev_err(&epc->dev, "no free outbound region\n");
  161. return -EINVAL;
  162. }
  163. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  164. cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
  165. set_bit(r, &ep->ob_region_map);
  166. ep->ob_addr[r] = addr;
  167. return 0;
  168. }
  169. static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
  170. phys_addr_t addr)
  171. {
  172. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  173. struct cdns_pcie *pcie = &ep->pcie;
  174. u32 r;
  175. for (r = 0; r < ep->max_regions - 1; r++)
  176. if (ep->ob_addr[r] == addr)
  177. break;
  178. if (r == ep->max_regions - 1)
  179. return;
  180. cdns_pcie_reset_outbound_region(pcie, r);
  181. ep->ob_addr[r] = 0;
  182. clear_bit(r, &ep->ob_region_map);
  183. }
  184. static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
  185. {
  186. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  187. struct cdns_pcie *pcie = &ep->pcie;
  188. u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
  189. u16 flags;
  190. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  191. /*
  192. * Set the Multiple Message Capable bitfield into the Message Control
  193. * register.
  194. */
  195. flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
  196. flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
  197. flags |= PCI_MSI_FLAGS_64BIT;
  198. flags &= ~PCI_MSI_FLAGS_MASKBIT;
  199. cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
  200. return 0;
  201. }
  202. static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
  203. {
  204. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  205. struct cdns_pcie *pcie = &ep->pcie;
  206. u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
  207. u16 flags, mme;
  208. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  209. /* Validate that the MSI feature is actually enabled. */
  210. flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
  211. if (!(flags & PCI_MSI_FLAGS_ENABLE))
  212. return -EINVAL;
  213. /*
  214. * Get the Multiple Message Enable bitfield from the Message Control
  215. * register.
  216. */
  217. mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
  218. return mme;
  219. }
  220. static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
  221. {
  222. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  223. struct cdns_pcie *pcie = &ep->pcie;
  224. u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
  225. u32 val, reg;
  226. func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
  227. reg = cap + PCI_MSIX_FLAGS;
  228. val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
  229. if (!(val & PCI_MSIX_FLAGS_ENABLE))
  230. return -EINVAL;
  231. val &= PCI_MSIX_FLAGS_QSIZE;
  232. return val;
  233. }
  234. static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
  235. u16 interrupts, enum pci_barno bir,
  236. u32 offset)
  237. {
  238. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  239. struct cdns_pcie *pcie = &ep->pcie;
  240. u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
  241. u32 val, reg;
  242. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  243. reg = cap + PCI_MSIX_FLAGS;
  244. val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
  245. val &= ~PCI_MSIX_FLAGS_QSIZE;
  246. val |= interrupts;
  247. cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
  248. /* Set MSIX BAR and offset */
  249. reg = cap + PCI_MSIX_TABLE;
  250. val = offset | bir;
  251. cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
  252. /* Set PBA BAR and offset. BAR must match MSIX BAR */
  253. reg = cap + PCI_MSIX_PBA;
  254. val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
  255. cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
  256. return 0;
  257. }
  258. static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
  259. bool is_asserted)
  260. {
  261. struct cdns_pcie *pcie = &ep->pcie;
  262. unsigned long flags;
  263. u32 offset;
  264. u16 status;
  265. u8 msg_code;
  266. intx &= 3;
  267. /* Set the outbound region if needed. */
  268. if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
  269. ep->irq_pci_fn != fn)) {
  270. /* First region was reserved for IRQ writes. */
  271. cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
  272. ep->irq_phys_addr);
  273. ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
  274. ep->irq_pci_fn = fn;
  275. }
  276. if (is_asserted) {
  277. ep->irq_pending |= BIT(intx);
  278. msg_code = MSG_CODE_ASSERT_INTA + intx;
  279. } else {
  280. ep->irq_pending &= ~BIT(intx);
  281. msg_code = MSG_CODE_DEASSERT_INTA + intx;
  282. }
  283. spin_lock_irqsave(&ep->lock, flags);
  284. status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
  285. if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
  286. status ^= PCI_STATUS_INTERRUPT;
  287. cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
  288. }
  289. spin_unlock_irqrestore(&ep->lock, flags);
  290. offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
  291. CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
  292. CDNS_PCIE_MSG_NO_DATA;
  293. writel(0, ep->irq_cpu_addr + offset);
  294. }
  295. static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
  296. u8 intx)
  297. {
  298. u16 cmd;
  299. cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
  300. if (cmd & PCI_COMMAND_INTX_DISABLE)
  301. return -EINVAL;
  302. cdns_pcie_ep_assert_intx(ep, fn, intx, true);
  303. /*
  304. * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
  305. */
  306. mdelay(1);
  307. cdns_pcie_ep_assert_intx(ep, fn, intx, false);
  308. return 0;
  309. }
  310. static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
  311. u8 interrupt_num)
  312. {
  313. struct cdns_pcie *pcie = &ep->pcie;
  314. u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
  315. u16 flags, mme, data, data_mask;
  316. u8 msi_count;
  317. u64 pci_addr, pci_addr_mask = 0xff;
  318. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  319. /* Check whether the MSI feature has been enabled by the PCI host. */
  320. flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
  321. if (!(flags & PCI_MSI_FLAGS_ENABLE))
  322. return -EINVAL;
  323. /* Get the number of enabled MSIs */
  324. mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
  325. msi_count = 1 << mme;
  326. if (!interrupt_num || interrupt_num > msi_count)
  327. return -EINVAL;
  328. /* Compute the data value to be written. */
  329. data_mask = msi_count - 1;
  330. data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
  331. data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
  332. /* Get the PCI address where to write the data into. */
  333. pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
  334. pci_addr <<= 32;
  335. pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
  336. pci_addr &= GENMASK_ULL(63, 2);
  337. /* Set the outbound region if needed. */
  338. if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
  339. ep->irq_pci_fn != fn)) {
  340. /* First region was reserved for IRQ writes. */
  341. cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
  342. false,
  343. ep->irq_phys_addr,
  344. pci_addr & ~pci_addr_mask,
  345. pci_addr_mask + 1);
  346. ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
  347. ep->irq_pci_fn = fn;
  348. }
  349. writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
  350. return 0;
  351. }
  352. static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
  353. phys_addr_t addr, u8 interrupt_num,
  354. u32 entry_size, u32 *msi_data,
  355. u32 *msi_addr_offset)
  356. {
  357. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  358. u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
  359. struct cdns_pcie *pcie = &ep->pcie;
  360. u64 pci_addr, pci_addr_mask = 0xff;
  361. u16 flags, mme, data, data_mask;
  362. u8 msi_count;
  363. int ret;
  364. int i;
  365. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  366. /* Check whether the MSI feature has been enabled by the PCI host. */
  367. flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
  368. if (!(flags & PCI_MSI_FLAGS_ENABLE))
  369. return -EINVAL;
  370. /* Get the number of enabled MSIs */
  371. mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
  372. msi_count = 1 << mme;
  373. if (!interrupt_num || interrupt_num > msi_count)
  374. return -EINVAL;
  375. /* Compute the data value to be written. */
  376. data_mask = msi_count - 1;
  377. data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
  378. data = data & ~data_mask;
  379. /* Get the PCI address where to write the data into. */
  380. pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
  381. pci_addr <<= 32;
  382. pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
  383. pci_addr &= GENMASK_ULL(63, 2);
  384. for (i = 0; i < interrupt_num; i++) {
  385. ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
  386. pci_addr & ~pci_addr_mask,
  387. entry_size);
  388. if (ret)
  389. return ret;
  390. addr = addr + entry_size;
  391. }
  392. *msi_data = data;
  393. *msi_addr_offset = pci_addr & pci_addr_mask;
  394. return 0;
  395. }
  396. static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
  397. u16 interrupt_num)
  398. {
  399. u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
  400. u32 tbl_offset, msg_data, reg;
  401. struct cdns_pcie *pcie = &ep->pcie;
  402. struct pci_epf_msix_tbl *msix_tbl;
  403. struct cdns_pcie_epf *epf;
  404. u64 pci_addr_mask = 0xff;
  405. u64 msg_addr;
  406. u16 flags;
  407. u8 bir;
  408. epf = &ep->epf[fn];
  409. if (vfn > 0)
  410. epf = &epf->epf[vfn - 1];
  411. fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
  412. /* Check whether the MSI-X feature has been enabled by the PCI host. */
  413. flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
  414. if (!(flags & PCI_MSIX_FLAGS_ENABLE))
  415. return -EINVAL;
  416. reg = cap + PCI_MSIX_TABLE;
  417. tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
  418. bir = tbl_offset & PCI_MSIX_TABLE_BIR;
  419. tbl_offset &= PCI_MSIX_TABLE_OFFSET;
  420. msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
  421. msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
  422. msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
  423. /* Set the outbound region if needed. */
  424. if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
  425. ep->irq_pci_fn != fn) {
  426. /* First region was reserved for IRQ writes. */
  427. cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
  428. false,
  429. ep->irq_phys_addr,
  430. msg_addr & ~pci_addr_mask,
  431. pci_addr_mask + 1);
  432. ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
  433. ep->irq_pci_fn = fn;
  434. }
  435. writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
  436. return 0;
  437. }
  438. static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
  439. enum pci_epc_irq_type type,
  440. u16 interrupt_num)
  441. {
  442. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  443. struct cdns_pcie *pcie = &ep->pcie;
  444. struct device *dev = pcie->dev;
  445. switch (type) {
  446. case PCI_EPC_IRQ_LEGACY:
  447. if (vfn > 0) {
  448. dev_err(dev, "Cannot raise legacy interrupts for VF\n");
  449. return -EINVAL;
  450. }
  451. return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
  452. case PCI_EPC_IRQ_MSI:
  453. return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
  454. case PCI_EPC_IRQ_MSIX:
  455. return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
  456. default:
  457. break;
  458. }
  459. return -EINVAL;
  460. }
  461. static int cdns_pcie_ep_start(struct pci_epc *epc)
  462. {
  463. struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
  464. struct cdns_pcie *pcie = &ep->pcie;
  465. struct device *dev = pcie->dev;
  466. int max_epfs = sizeof(epc->function_num_map) * 8;
  467. int ret, value, epf;
  468. /*
  469. * BIT(0) is hardwired to 1, hence function 0 is always enabled
  470. * and can't be disabled anyway.
  471. */
  472. cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
  473. if (ep->quirk_disable_flr) {
  474. for (epf = 0; epf < max_epfs; epf++) {
  475. if (!(epc->function_num_map & BIT(epf)))
  476. continue;
  477. value = cdns_pcie_ep_fn_readl(pcie, epf,
  478. CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
  479. PCI_EXP_DEVCAP);
  480. value &= ~PCI_EXP_DEVCAP_FLR;
  481. cdns_pcie_ep_fn_writel(pcie, epf,
  482. CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
  483. PCI_EXP_DEVCAP, value);
  484. }
  485. }
  486. ret = cdns_pcie_start_link(pcie);
  487. if (ret) {
  488. dev_err(dev, "Failed to start link\n");
  489. return ret;
  490. }
  491. return 0;
  492. }
  493. static const struct pci_epc_features cdns_pcie_epc_vf_features = {
  494. .linkup_notifier = false,
  495. .msi_capable = true,
  496. .msix_capable = true,
  497. .align = 65536,
  498. };
  499. static const struct pci_epc_features cdns_pcie_epc_features = {
  500. .linkup_notifier = false,
  501. .msi_capable = true,
  502. .msix_capable = true,
  503. .align = 256,
  504. };
  505. static const struct pci_epc_features*
  506. cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
  507. {
  508. if (!vfunc_no)
  509. return &cdns_pcie_epc_features;
  510. return &cdns_pcie_epc_vf_features;
  511. }
  512. static const struct pci_epc_ops cdns_pcie_epc_ops = {
  513. .write_header = cdns_pcie_ep_write_header,
  514. .set_bar = cdns_pcie_ep_set_bar,
  515. .clear_bar = cdns_pcie_ep_clear_bar,
  516. .map_addr = cdns_pcie_ep_map_addr,
  517. .unmap_addr = cdns_pcie_ep_unmap_addr,
  518. .set_msi = cdns_pcie_ep_set_msi,
  519. .get_msi = cdns_pcie_ep_get_msi,
  520. .set_msix = cdns_pcie_ep_set_msix,
  521. .get_msix = cdns_pcie_ep_get_msix,
  522. .raise_irq = cdns_pcie_ep_raise_irq,
  523. .map_msi_irq = cdns_pcie_ep_map_msi_irq,
  524. .start = cdns_pcie_ep_start,
  525. .get_features = cdns_pcie_ep_get_features,
  526. };
  527. int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
  528. {
  529. struct device *dev = ep->pcie.dev;
  530. struct platform_device *pdev = to_platform_device(dev);
  531. struct device_node *np = dev->of_node;
  532. struct cdns_pcie *pcie = &ep->pcie;
  533. struct cdns_pcie_epf *epf;
  534. struct resource *res;
  535. struct pci_epc *epc;
  536. int ret;
  537. int i;
  538. pcie->is_rc = false;
  539. pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
  540. if (IS_ERR(pcie->reg_base)) {
  541. dev_err(dev, "missing \"reg\"\n");
  542. return PTR_ERR(pcie->reg_base);
  543. }
  544. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
  545. if (!res) {
  546. dev_err(dev, "missing \"mem\"\n");
  547. return -EINVAL;
  548. }
  549. pcie->mem_res = res;
  550. ep->max_regions = CDNS_PCIE_MAX_OB;
  551. of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
  552. ep->ob_addr = devm_kcalloc(dev,
  553. ep->max_regions, sizeof(*ep->ob_addr),
  554. GFP_KERNEL);
  555. if (!ep->ob_addr)
  556. return -ENOMEM;
  557. /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
  558. cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
  559. epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
  560. if (IS_ERR(epc)) {
  561. dev_err(dev, "failed to create epc device\n");
  562. return PTR_ERR(epc);
  563. }
  564. epc_set_drvdata(epc, ep);
  565. if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
  566. epc->max_functions = 1;
  567. ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
  568. GFP_KERNEL);
  569. if (!ep->epf)
  570. return -ENOMEM;
  571. epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
  572. sizeof(*epc->max_vfs), GFP_KERNEL);
  573. if (!epc->max_vfs)
  574. return -ENOMEM;
  575. ret = of_property_read_u8_array(np, "max-virtual-functions",
  576. epc->max_vfs, epc->max_functions);
  577. if (ret == 0) {
  578. for (i = 0; i < epc->max_functions; i++) {
  579. epf = &ep->epf[i];
  580. if (epc->max_vfs[i] == 0)
  581. continue;
  582. epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
  583. sizeof(*ep->epf), GFP_KERNEL);
  584. if (!epf->epf)
  585. return -ENOMEM;
  586. }
  587. }
  588. ret = pci_epc_mem_init(epc, pcie->mem_res->start,
  589. resource_size(pcie->mem_res), PAGE_SIZE);
  590. if (ret < 0) {
  591. dev_err(dev, "failed to initialize the memory space\n");
  592. return ret;
  593. }
  594. ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
  595. SZ_128K);
  596. if (!ep->irq_cpu_addr) {
  597. dev_err(dev, "failed to reserve memory space for MSI\n");
  598. ret = -ENOMEM;
  599. goto free_epc_mem;
  600. }
  601. ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
  602. /* Reserve region 0 for IRQs */
  603. set_bit(0, &ep->ob_region_map);
  604. if (ep->quirk_detect_quiet_flag)
  605. cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
  606. spin_lock_init(&ep->lock);
  607. return 0;
  608. free_epc_mem:
  609. pci_epc_mem_exit(epc);
  610. return ret;
  611. }