pcie-cadence.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (c) 2017 Cadence
  3. // Cadence PCIe controller driver.
  4. // Author: Cyrille Pitchen <[email protected]>
  5. #ifndef _PCIE_CADENCE_H
  6. #define _PCIE_CADENCE_H
  7. #include <linux/kernel.h>
  8. #include <linux/pci.h>
  9. #include <linux/pci-epf.h>
  10. #include <linux/phy/phy.h>
  11. /* Parameters for the waiting for link up routine */
  12. #define LINK_WAIT_MAX_RETRIES 10
  13. #define LINK_WAIT_USLEEP_MIN 90000
  14. #define LINK_WAIT_USLEEP_MAX 100000
  15. /*
  16. * Local Management Registers
  17. */
  18. #define CDNS_PCIE_LM_BASE 0x00100000
  19. /* Vendor ID Register */
  20. #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
  21. #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
  22. #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
  23. #define CDNS_PCIE_LM_ID_VENDOR(vid) \
  24. (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
  25. #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
  26. #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
  27. #define CDNS_PCIE_LM_ID_SUBSYS(sub) \
  28. (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
  29. /* Root Port Requestor ID Register */
  30. #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
  31. #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
  32. #define CDNS_PCIE_LM_RP_RID_SHIFT 0
  33. #define CDNS_PCIE_LM_RP_RID_(rid) \
  34. (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
  35. /* Endpoint Bus and Device Number Register */
  36. #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
  37. #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
  38. #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
  39. #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
  40. #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
  41. /* Endpoint Function f BAR b Configuration Registers */
  42. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
  43. (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
  44. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
  45. (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
  46. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
  47. (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
  48. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
  49. (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
  50. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
  51. (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
  52. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
  53. (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
  54. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
  55. (GENMASK(4, 0) << ((b) * 8))
  56. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
  57. (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
  58. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
  59. (GENMASK(7, 5) << ((b) * 8))
  60. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
  61. (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
  62. /* Endpoint Function Configuration Register */
  63. #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
  64. /* Root Complex BAR Configuration Register */
  65. #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
  66. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
  67. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
  68. (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
  69. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
  70. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
  71. (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
  72. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
  73. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
  74. (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
  75. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
  76. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
  77. (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
  78. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
  79. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
  80. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
  81. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
  82. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
  83. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
  84. #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
  85. /* BAR control values applicable to both Endpoint Function and Root Complex */
  86. #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
  87. #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
  88. #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
  89. #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
  90. #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
  91. #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
  92. #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
  93. (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
  94. #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
  95. (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
  96. #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
  97. (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
  98. #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
  99. (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
  100. #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
  101. (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
  102. #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
  103. (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
  104. #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
  105. (((aperture) - 2) << ((bar) * 8))
  106. /* PTM Control Register */
  107. #define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
  108. #define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
  109. /*
  110. * Endpoint Function Registers (PCI configuration space for endpoint functions)
  111. */
  112. #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
  113. #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
  114. #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
  115. #define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
  116. #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
  117. /*
  118. * Root Port Registers (PCI configuration space for the root port function)
  119. */
  120. #define CDNS_PCIE_RP_BASE 0x00200000
  121. #define CDNS_PCIE_RP_CAP_OFFSET 0xc0
  122. /*
  123. * Address Translation Registers
  124. */
  125. #define CDNS_PCIE_AT_BASE 0x00400000
  126. /* Region r Outbound AXI to PCIe Address Translation Register 0 */
  127. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
  128. (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
  129. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
  130. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
  131. (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
  132. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
  133. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
  134. (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
  135. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
  136. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
  137. (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
  138. /* Region r Outbound AXI to PCIe Address Translation Register 1 */
  139. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
  140. (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
  141. /* Region r Outbound PCIe Descriptor Register 0 */
  142. #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
  143. (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
  144. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
  145. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
  146. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
  147. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
  148. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
  149. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
  150. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
  151. /* Bit 23 MUST be set in RC mode. */
  152. #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
  153. #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
  154. #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
  155. (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
  156. /* Region r Outbound PCIe Descriptor Register 1 */
  157. #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
  158. (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
  159. #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
  160. #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
  161. ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
  162. /* Region r AXI Region Base Address Register 0 */
  163. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
  164. (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
  165. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
  166. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
  167. (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
  168. /* Region r AXI Region Base Address Register 1 */
  169. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
  170. (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
  171. /* Root Port BAR Inbound PCIe to AXI Address Translation Register */
  172. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
  173. (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
  174. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
  175. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
  176. (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
  177. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
  178. (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
  179. /* AXI link down register */
  180. #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
  181. /* LTSSM Capabilities register */
  182. #define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
  183. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
  184. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
  185. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
  186. (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
  187. CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
  188. enum cdns_pcie_rp_bar {
  189. RP_BAR_UNDEFINED = -1,
  190. RP_BAR0,
  191. RP_BAR1,
  192. RP_NO_BAR
  193. };
  194. #define CDNS_PCIE_RP_MAX_IB 0x3
  195. #define CDNS_PCIE_MAX_OB 32
  196. struct cdns_pcie_rp_ib_bar {
  197. u64 size;
  198. bool free;
  199. };
  200. /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
  201. #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
  202. (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
  203. #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
  204. (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
  205. /* Normal/Vendor specific message access: offset inside some outbound region */
  206. #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
  207. #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
  208. (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
  209. #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
  210. #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
  211. (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
  212. #define CDNS_PCIE_MSG_NO_DATA BIT(16)
  213. struct cdns_pcie;
  214. enum cdns_pcie_msg_code {
  215. MSG_CODE_ASSERT_INTA = 0x20,
  216. MSG_CODE_ASSERT_INTB = 0x21,
  217. MSG_CODE_ASSERT_INTC = 0x22,
  218. MSG_CODE_ASSERT_INTD = 0x23,
  219. MSG_CODE_DEASSERT_INTA = 0x24,
  220. MSG_CODE_DEASSERT_INTB = 0x25,
  221. MSG_CODE_DEASSERT_INTC = 0x26,
  222. MSG_CODE_DEASSERT_INTD = 0x27,
  223. };
  224. enum cdns_pcie_msg_routing {
  225. /* Route to Root Complex */
  226. MSG_ROUTING_TO_RC,
  227. /* Use Address Routing */
  228. MSG_ROUTING_BY_ADDR,
  229. /* Use ID Routing */
  230. MSG_ROUTING_BY_ID,
  231. /* Route as Broadcast Message from Root Complex */
  232. MSG_ROUTING_BCAST,
  233. /* Local message; terminate at receiver (INTx messages) */
  234. MSG_ROUTING_LOCAL,
  235. /* Gather & route to Root Complex (PME_TO_Ack message) */
  236. MSG_ROUTING_GATHER,
  237. };
  238. struct cdns_pcie_ops {
  239. int (*start_link)(struct cdns_pcie *pcie);
  240. void (*stop_link)(struct cdns_pcie *pcie);
  241. bool (*link_up)(struct cdns_pcie *pcie);
  242. u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
  243. };
  244. /**
  245. * struct cdns_pcie - private data for Cadence PCIe controller drivers
  246. * @reg_base: IO mapped register base
  247. * @mem_res: start/end offsets in the physical system memory to map PCI accesses
  248. * @dev: PCIe controller
  249. * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
  250. * @phy_count: number of supported PHY devices
  251. * @phy: list of pointers to specific PHY control blocks
  252. * @link: list of pointers to corresponding device link representations
  253. * @ops: Platform-specific ops to control various inputs from Cadence PCIe
  254. * wrapper
  255. */
  256. struct cdns_pcie {
  257. void __iomem *reg_base;
  258. struct resource *mem_res;
  259. struct device *dev;
  260. bool is_rc;
  261. int phy_count;
  262. struct phy **phy;
  263. struct device_link **link;
  264. const struct cdns_pcie_ops *ops;
  265. };
  266. /**
  267. * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
  268. * @pcie: Cadence PCIe controller
  269. * @dev: pointer to PCIe device
  270. * @cfg_res: start/end offsets in the physical system memory to map PCI
  271. * configuration space accesses
  272. * @cfg_base: IO mapped window to access the PCI configuration space of a
  273. * single function at a time
  274. * @vendor_id: PCI vendor ID
  275. * @device_id: PCI device ID
  276. * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
  277. * available
  278. * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
  279. * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
  280. */
  281. struct cdns_pcie_rc {
  282. struct cdns_pcie pcie;
  283. struct resource *cfg_res;
  284. void __iomem *cfg_base;
  285. u32 vendor_id;
  286. u32 device_id;
  287. bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
  288. unsigned int quirk_retrain_flag:1;
  289. unsigned int quirk_detect_quiet_flag:1;
  290. };
  291. /**
  292. * struct cdns_pcie_epf - Structure to hold info about endpoint function
  293. * @epf: Info about virtual functions attached to the physical function
  294. * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
  295. */
  296. struct cdns_pcie_epf {
  297. struct cdns_pcie_epf *epf;
  298. struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
  299. };
  300. /**
  301. * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
  302. * @pcie: Cadence PCIe controller
  303. * @max_regions: maximum number of regions supported by hardware
  304. * @ob_region_map: bitmask of mapped outbound regions
  305. * @ob_addr: base addresses in the AXI bus where the outbound regions start
  306. * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
  307. * dedicated outbound regions is mapped.
  308. * @irq_cpu_addr: base address in the CPU space where a write access triggers
  309. * the sending of a memory write (MSI) / normal message (legacy
  310. * IRQ) TLP through the PCIe bus.
  311. * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
  312. * dedicated outbound region.
  313. * @irq_pci_fn: the latest PCI function that has updated the mapping of
  314. * the MSI/legacy IRQ dedicated outbound region.
  315. * @irq_pending: bitmask of asserted legacy IRQs.
  316. * @lock: spin lock to disable interrupts while modifying PCIe controller
  317. * registers fields (RMW) accessible by both remote RC and EP to
  318. * minimize time between read and write
  319. * @epf: Structure to hold info about endpoint function
  320. * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
  321. * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
  322. */
  323. struct cdns_pcie_ep {
  324. struct cdns_pcie pcie;
  325. u32 max_regions;
  326. unsigned long ob_region_map;
  327. phys_addr_t *ob_addr;
  328. phys_addr_t irq_phys_addr;
  329. void __iomem *irq_cpu_addr;
  330. u64 irq_pci_addr;
  331. u8 irq_pci_fn;
  332. u8 irq_pending;
  333. /* protect writing to PCI_STATUS while raising legacy interrupts */
  334. spinlock_t lock;
  335. struct cdns_pcie_epf *epf;
  336. unsigned int quirk_detect_quiet_flag:1;
  337. unsigned int quirk_disable_flr:1;
  338. };
  339. /* Register access */
  340. static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
  341. {
  342. writel(value, pcie->reg_base + reg);
  343. }
  344. static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
  345. {
  346. return readl(pcie->reg_base + reg);
  347. }
  348. static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
  349. {
  350. void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
  351. unsigned int offset = (unsigned long)addr & 0x3;
  352. u32 val = readl(aligned_addr);
  353. if (!IS_ALIGNED((uintptr_t)addr, size)) {
  354. pr_warn("Address %p and size %d are not aligned\n", addr, size);
  355. return 0;
  356. }
  357. if (size > 2)
  358. return val;
  359. return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
  360. }
  361. static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
  362. {
  363. void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
  364. unsigned int offset = (unsigned long)addr & 0x3;
  365. u32 mask;
  366. u32 val;
  367. if (!IS_ALIGNED((uintptr_t)addr, size)) {
  368. pr_warn("Address %p and size %d are not aligned\n", addr, size);
  369. return;
  370. }
  371. if (size > 2) {
  372. writel(value, addr);
  373. return;
  374. }
  375. mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
  376. val = readl(aligned_addr) & mask;
  377. val |= value << (offset * 8);
  378. writel(val, aligned_addr);
  379. }
  380. /* Root Port register access */
  381. static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
  382. u32 reg, u8 value)
  383. {
  384. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  385. cdns_pcie_write_sz(addr, 0x1, value);
  386. }
  387. static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
  388. u32 reg, u16 value)
  389. {
  390. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  391. cdns_pcie_write_sz(addr, 0x2, value);
  392. }
  393. static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
  394. {
  395. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  396. return cdns_pcie_read_sz(addr, 0x2);
  397. }
  398. /* Endpoint Function register access */
  399. static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
  400. u32 reg, u8 value)
  401. {
  402. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  403. cdns_pcie_write_sz(addr, 0x1, value);
  404. }
  405. static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
  406. u32 reg, u16 value)
  407. {
  408. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  409. cdns_pcie_write_sz(addr, 0x2, value);
  410. }
  411. static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
  412. u32 reg, u32 value)
  413. {
  414. writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
  415. }
  416. static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
  417. {
  418. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  419. return cdns_pcie_read_sz(addr, 0x2);
  420. }
  421. static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
  422. {
  423. return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
  424. }
  425. static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
  426. {
  427. if (pcie->ops->start_link)
  428. return pcie->ops->start_link(pcie);
  429. return 0;
  430. }
  431. static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
  432. {
  433. if (pcie->ops->stop_link)
  434. pcie->ops->stop_link(pcie);
  435. }
  436. static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
  437. {
  438. if (pcie->ops->link_up)
  439. return pcie->ops->link_up(pcie);
  440. return true;
  441. }
  442. #ifdef CONFIG_PCIE_CADENCE_HOST
  443. int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
  444. void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
  445. int where);
  446. #else
  447. static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
  448. {
  449. return 0;
  450. }
  451. static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
  452. int where)
  453. {
  454. return NULL;
  455. }
  456. #endif
  457. #ifdef CONFIG_PCIE_CADENCE_EP
  458. int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
  459. #else
  460. static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
  461. {
  462. return 0;
  463. }
  464. #endif
  465. void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
  466. void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
  467. u32 r, bool is_io,
  468. u64 cpu_addr, u64 pci_addr, size_t size);
  469. void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
  470. u8 busnr, u8 fn,
  471. u32 r, u64 cpu_addr);
  472. void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
  473. void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
  474. int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
  475. int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
  476. extern const struct dev_pm_ops cdns_pcie_pm_ops;
  477. #endif /* _PCIE_CADENCE_H */