pcie-microchip-host.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microchip AXI PCIe Bridge host controller driver
  4. *
  5. * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
  6. *
  7. * Author: Daire McNamara <[email protected]>
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/irqchip/chained_irq.h>
  11. #include <linux/module.h>
  12. #include <linux/msi.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/of_pci.h>
  16. #include <linux/pci-ecam.h>
  17. #include <linux/platform_device.h>
  18. #include "../pci.h"
  19. /* Number of MSI IRQs */
  20. #define MC_NUM_MSI_IRQS 32
  21. #define MC_NUM_MSI_IRQS_CODED 5
  22. /* PCIe Bridge Phy and Controller Phy offsets */
  23. #define MC_PCIE1_BRIDGE_ADDR 0x00008000u
  24. #define MC_PCIE1_CTRL_ADDR 0x0000a000u
  25. #define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
  26. #define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
  27. /* PCIe Controller Phy Regs */
  28. #define SEC_ERROR_CNT 0x20
  29. #define DED_ERROR_CNT 0x24
  30. #define SEC_ERROR_INT 0x28
  31. #define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
  32. #define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
  33. #define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
  34. #define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
  35. #define NUM_SEC_ERROR_INTS (4)
  36. #define SEC_ERROR_INT_MASK 0x2c
  37. #define DED_ERROR_INT 0x30
  38. #define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
  39. #define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
  40. #define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
  41. #define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
  42. #define NUM_DED_ERROR_INTS (4)
  43. #define DED_ERROR_INT_MASK 0x34
  44. #define ECC_CONTROL 0x38
  45. #define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
  46. #define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
  47. #define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
  48. #define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
  49. #define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
  50. #define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
  51. #define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
  52. #define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
  53. #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
  54. #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
  55. #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
  56. #define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
  57. #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
  58. #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
  59. #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
  60. #define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
  61. #define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
  62. #define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
  63. #define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
  64. #define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
  65. #define LTSSM_STATE 0x5c
  66. #define LTSSM_L0_STATE 0x10
  67. #define PCIE_EVENT_INT 0x14c
  68. #define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
  69. #define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
  70. #define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
  71. #define PCIE_EVENT_INT_MASK GENMASK(2, 0)
  72. #define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
  73. #define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
  74. #define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
  75. #define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
  76. #define PCIE_EVENT_INT_ENB_SHIFT 16
  77. #define NUM_PCIE_EVENTS (3)
  78. /* PCIe Bridge Phy Regs */
  79. #define PCIE_PCI_IDS_DW1 0x9c
  80. /* PCIe Config space MSI capability structure */
  81. #define MC_MSI_CAP_CTRL_OFFSET 0xe0u
  82. #define MC_MSI_MAX_Q_AVAIL (MC_NUM_MSI_IRQS_CODED << 1)
  83. #define MC_MSI_Q_SIZE (MC_NUM_MSI_IRQS_CODED << 4)
  84. #define IMASK_LOCAL 0x180
  85. #define DMA_END_ENGINE_0_MASK 0x00000000u
  86. #define DMA_END_ENGINE_0_SHIFT 0
  87. #define DMA_END_ENGINE_1_MASK 0x00000000u
  88. #define DMA_END_ENGINE_1_SHIFT 1
  89. #define DMA_ERROR_ENGINE_0_MASK 0x00000100u
  90. #define DMA_ERROR_ENGINE_0_SHIFT 8
  91. #define DMA_ERROR_ENGINE_1_MASK 0x00000200u
  92. #define DMA_ERROR_ENGINE_1_SHIFT 9
  93. #define A_ATR_EVT_POST_ERR_MASK 0x00010000u
  94. #define A_ATR_EVT_POST_ERR_SHIFT 16
  95. #define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
  96. #define A_ATR_EVT_FETCH_ERR_SHIFT 17
  97. #define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
  98. #define A_ATR_EVT_DISCARD_ERR_SHIFT 18
  99. #define A_ATR_EVT_DOORBELL_MASK 0x00000000u
  100. #define A_ATR_EVT_DOORBELL_SHIFT 19
  101. #define P_ATR_EVT_POST_ERR_MASK 0x00100000u
  102. #define P_ATR_EVT_POST_ERR_SHIFT 20
  103. #define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
  104. #define P_ATR_EVT_FETCH_ERR_SHIFT 21
  105. #define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
  106. #define P_ATR_EVT_DISCARD_ERR_SHIFT 22
  107. #define P_ATR_EVT_DOORBELL_MASK 0x00000000u
  108. #define P_ATR_EVT_DOORBELL_SHIFT 23
  109. #define PM_MSI_INT_INTA_MASK 0x01000000u
  110. #define PM_MSI_INT_INTA_SHIFT 24
  111. #define PM_MSI_INT_INTB_MASK 0x02000000u
  112. #define PM_MSI_INT_INTB_SHIFT 25
  113. #define PM_MSI_INT_INTC_MASK 0x04000000u
  114. #define PM_MSI_INT_INTC_SHIFT 26
  115. #define PM_MSI_INT_INTD_MASK 0x08000000u
  116. #define PM_MSI_INT_INTD_SHIFT 27
  117. #define PM_MSI_INT_INTX_MASK 0x0f000000u
  118. #define PM_MSI_INT_INTX_SHIFT 24
  119. #define PM_MSI_INT_MSI_MASK 0x10000000u
  120. #define PM_MSI_INT_MSI_SHIFT 28
  121. #define PM_MSI_INT_AER_EVT_MASK 0x20000000u
  122. #define PM_MSI_INT_AER_EVT_SHIFT 29
  123. #define PM_MSI_INT_EVENTS_MASK 0x40000000u
  124. #define PM_MSI_INT_EVENTS_SHIFT 30
  125. #define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
  126. #define PM_MSI_INT_SYS_ERR_SHIFT 31
  127. #define NUM_LOCAL_EVENTS 15
  128. #define ISTATUS_LOCAL 0x184
  129. #define IMASK_HOST 0x188
  130. #define ISTATUS_HOST 0x18c
  131. #define MSI_ADDR 0x190
  132. #define ISTATUS_MSI 0x194
  133. /* PCIe Master table init defines */
  134. #define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
  135. #define ATR0_PCIE_ATR_SIZE 0x25
  136. #define ATR0_PCIE_ATR_SIZE_SHIFT 1
  137. #define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
  138. #define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
  139. #define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
  140. #define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
  141. /* PCIe AXI slave table init defines */
  142. #define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
  143. #define ATR_SIZE_SHIFT 1
  144. #define ATR_IMPL_ENABLE 1
  145. #define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
  146. #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
  147. #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
  148. #define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
  149. #define PCIE_TX_RX_INTERFACE 0x00000000u
  150. #define PCIE_CONFIG_INTERFACE 0x00000001u
  151. #define ATR_ENTRY_SIZE 32
  152. #define EVENT_PCIE_L2_EXIT 0
  153. #define EVENT_PCIE_HOTRST_EXIT 1
  154. #define EVENT_PCIE_DLUP_EXIT 2
  155. #define EVENT_SEC_TX_RAM_SEC_ERR 3
  156. #define EVENT_SEC_RX_RAM_SEC_ERR 4
  157. #define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
  158. #define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
  159. #define EVENT_DED_TX_RAM_DED_ERR 7
  160. #define EVENT_DED_RX_RAM_DED_ERR 8
  161. #define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
  162. #define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
  163. #define EVENT_LOCAL_DMA_END_ENGINE_0 11
  164. #define EVENT_LOCAL_DMA_END_ENGINE_1 12
  165. #define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
  166. #define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
  167. #define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
  168. #define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
  169. #define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
  170. #define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
  171. #define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
  172. #define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
  173. #define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
  174. #define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
  175. #define EVENT_LOCAL_PM_MSI_INT_INTX 23
  176. #define EVENT_LOCAL_PM_MSI_INT_MSI 24
  177. #define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
  178. #define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
  179. #define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
  180. #define NUM_EVENTS 28
  181. #define PCIE_EVENT_CAUSE(x, s) \
  182. [EVENT_PCIE_ ## x] = { __stringify(x), s }
  183. #define SEC_ERROR_CAUSE(x, s) \
  184. [EVENT_SEC_ ## x] = { __stringify(x), s }
  185. #define DED_ERROR_CAUSE(x, s) \
  186. [EVENT_DED_ ## x] = { __stringify(x), s }
  187. #define LOCAL_EVENT_CAUSE(x, s) \
  188. [EVENT_LOCAL_ ## x] = { __stringify(x), s }
  189. #define PCIE_EVENT(x) \
  190. .base = MC_PCIE_CTRL_ADDR, \
  191. .offset = PCIE_EVENT_INT, \
  192. .mask_offset = PCIE_EVENT_INT, \
  193. .mask_high = 1, \
  194. .mask = PCIE_EVENT_INT_ ## x ## _INT, \
  195. .enb_mask = PCIE_EVENT_INT_ENB_MASK
  196. #define SEC_EVENT(x) \
  197. .base = MC_PCIE_CTRL_ADDR, \
  198. .offset = SEC_ERROR_INT, \
  199. .mask_offset = SEC_ERROR_INT_MASK, \
  200. .mask = SEC_ERROR_INT_ ## x ## _INT, \
  201. .mask_high = 1, \
  202. .enb_mask = 0
  203. #define DED_EVENT(x) \
  204. .base = MC_PCIE_CTRL_ADDR, \
  205. .offset = DED_ERROR_INT, \
  206. .mask_offset = DED_ERROR_INT_MASK, \
  207. .mask_high = 1, \
  208. .mask = DED_ERROR_INT_ ## x ## _INT, \
  209. .enb_mask = 0
  210. #define LOCAL_EVENT(x) \
  211. .base = MC_PCIE_BRIDGE_ADDR, \
  212. .offset = ISTATUS_LOCAL, \
  213. .mask_offset = IMASK_LOCAL, \
  214. .mask_high = 0, \
  215. .mask = x ## _MASK, \
  216. .enb_mask = 0
  217. #define PCIE_EVENT_TO_EVENT_MAP(x) \
  218. { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
  219. #define SEC_ERROR_TO_EVENT_MAP(x) \
  220. { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
  221. #define DED_ERROR_TO_EVENT_MAP(x) \
  222. { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
  223. #define LOCAL_STATUS_TO_EVENT_MAP(x) \
  224. { x ## _MASK, EVENT_LOCAL_ ## x }
  225. struct event_map {
  226. u32 reg_mask;
  227. u32 event_bit;
  228. };
  229. struct mc_msi {
  230. struct mutex lock; /* Protect used bitmap */
  231. struct irq_domain *msi_domain;
  232. struct irq_domain *dev_domain;
  233. u32 num_vectors;
  234. u64 vector_phy;
  235. DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
  236. };
  237. struct mc_pcie {
  238. void __iomem *axi_base_addr;
  239. struct device *dev;
  240. struct irq_domain *intx_domain;
  241. struct irq_domain *event_domain;
  242. raw_spinlock_t lock;
  243. struct mc_msi msi;
  244. };
  245. struct cause {
  246. const char *sym;
  247. const char *str;
  248. };
  249. static const struct cause event_cause[NUM_EVENTS] = {
  250. PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
  251. PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
  252. PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
  253. SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
  254. SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
  255. SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
  256. SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
  257. DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
  258. DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
  259. DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
  260. DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
  261. LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
  262. LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
  263. LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
  264. LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
  265. LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
  266. LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
  267. LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
  268. LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
  269. LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
  270. LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
  271. LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
  272. };
  273. static struct event_map pcie_event_to_event[] = {
  274. PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
  275. PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
  276. PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
  277. };
  278. static struct event_map sec_error_to_event[] = {
  279. SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
  280. SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
  281. SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
  282. SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
  283. };
  284. static struct event_map ded_error_to_event[] = {
  285. DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
  286. DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
  287. DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
  288. DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
  289. };
  290. static struct event_map local_status_to_event[] = {
  291. LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
  292. LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
  293. LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
  294. LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
  295. LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
  296. LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
  297. LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
  298. LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
  299. LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
  300. LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
  301. LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
  302. LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
  303. LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
  304. LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
  305. LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
  306. LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
  307. LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
  308. };
  309. static struct {
  310. u32 base;
  311. u32 offset;
  312. u32 mask;
  313. u32 shift;
  314. u32 enb_mask;
  315. u32 mask_high;
  316. u32 mask_offset;
  317. } event_descs[] = {
  318. { PCIE_EVENT(L2_EXIT) },
  319. { PCIE_EVENT(HOTRST_EXIT) },
  320. { PCIE_EVENT(DLUP_EXIT) },
  321. { SEC_EVENT(TX_RAM_SEC_ERR) },
  322. { SEC_EVENT(RX_RAM_SEC_ERR) },
  323. { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
  324. { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
  325. { DED_EVENT(TX_RAM_DED_ERR) },
  326. { DED_EVENT(RX_RAM_DED_ERR) },
  327. { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
  328. { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
  329. { LOCAL_EVENT(DMA_END_ENGINE_0) },
  330. { LOCAL_EVENT(DMA_END_ENGINE_1) },
  331. { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
  332. { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
  333. { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
  334. { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
  335. { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
  336. { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
  337. { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
  338. { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
  339. { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
  340. { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
  341. { LOCAL_EVENT(PM_MSI_INT_INTX) },
  342. { LOCAL_EVENT(PM_MSI_INT_MSI) },
  343. { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
  344. { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
  345. { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
  346. };
  347. static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
  348. static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
  349. {
  350. struct mc_msi *msi = &port->msi;
  351. u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
  352. u16 msg_ctrl = readw_relaxed(base + cap_offset + PCI_MSI_FLAGS);
  353. msg_ctrl |= PCI_MSI_FLAGS_ENABLE;
  354. msg_ctrl &= ~PCI_MSI_FLAGS_QMASK;
  355. msg_ctrl |= MC_MSI_MAX_Q_AVAIL;
  356. msg_ctrl &= ~PCI_MSI_FLAGS_QSIZE;
  357. msg_ctrl |= MC_MSI_Q_SIZE;
  358. msg_ctrl |= PCI_MSI_FLAGS_64BIT;
  359. writew_relaxed(msg_ctrl, base + cap_offset + PCI_MSI_FLAGS);
  360. writel_relaxed(lower_32_bits(msi->vector_phy),
  361. base + cap_offset + PCI_MSI_ADDRESS_LO);
  362. writel_relaxed(upper_32_bits(msi->vector_phy),
  363. base + cap_offset + PCI_MSI_ADDRESS_HI);
  364. }
  365. static void mc_handle_msi(struct irq_desc *desc)
  366. {
  367. struct mc_pcie *port = irq_desc_get_handler_data(desc);
  368. struct irq_chip *chip = irq_desc_get_chip(desc);
  369. struct device *dev = port->dev;
  370. struct mc_msi *msi = &port->msi;
  371. void __iomem *bridge_base_addr =
  372. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  373. unsigned long status;
  374. u32 bit;
  375. int ret;
  376. chained_irq_enter(chip, desc);
  377. status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
  378. if (status & PM_MSI_INT_MSI_MASK) {
  379. writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
  380. status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
  381. for_each_set_bit(bit, &status, msi->num_vectors) {
  382. ret = generic_handle_domain_irq(msi->dev_domain, bit);
  383. if (ret)
  384. dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
  385. bit);
  386. }
  387. }
  388. chained_irq_exit(chip, desc);
  389. }
  390. static void mc_msi_bottom_irq_ack(struct irq_data *data)
  391. {
  392. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  393. void __iomem *bridge_base_addr =
  394. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  395. u32 bitpos = data->hwirq;
  396. writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
  397. }
  398. static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  399. {
  400. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  401. phys_addr_t addr = port->msi.vector_phy;
  402. msg->address_lo = lower_32_bits(addr);
  403. msg->address_hi = upper_32_bits(addr);
  404. msg->data = data->hwirq;
  405. dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
  406. (int)data->hwirq, msg->address_hi, msg->address_lo);
  407. }
  408. static int mc_msi_set_affinity(struct irq_data *irq_data,
  409. const struct cpumask *mask, bool force)
  410. {
  411. return -EINVAL;
  412. }
  413. static struct irq_chip mc_msi_bottom_irq_chip = {
  414. .name = "Microchip MSI",
  415. .irq_ack = mc_msi_bottom_irq_ack,
  416. .irq_compose_msi_msg = mc_compose_msi_msg,
  417. .irq_set_affinity = mc_msi_set_affinity,
  418. };
  419. static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
  420. unsigned int nr_irqs, void *args)
  421. {
  422. struct mc_pcie *port = domain->host_data;
  423. struct mc_msi *msi = &port->msi;
  424. void __iomem *bridge_base_addr =
  425. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  426. unsigned long bit;
  427. u32 val;
  428. mutex_lock(&msi->lock);
  429. bit = find_first_zero_bit(msi->used, msi->num_vectors);
  430. if (bit >= msi->num_vectors) {
  431. mutex_unlock(&msi->lock);
  432. return -ENOSPC;
  433. }
  434. set_bit(bit, msi->used);
  435. irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
  436. domain->host_data, handle_edge_irq, NULL, NULL);
  437. /* Enable MSI interrupts */
  438. val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
  439. val |= PM_MSI_INT_MSI_MASK;
  440. writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
  441. mutex_unlock(&msi->lock);
  442. return 0;
  443. }
  444. static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
  445. unsigned int nr_irqs)
  446. {
  447. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  448. struct mc_pcie *port = irq_data_get_irq_chip_data(d);
  449. struct mc_msi *msi = &port->msi;
  450. mutex_lock(&msi->lock);
  451. if (test_bit(d->hwirq, msi->used))
  452. __clear_bit(d->hwirq, msi->used);
  453. else
  454. dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
  455. mutex_unlock(&msi->lock);
  456. }
  457. static const struct irq_domain_ops msi_domain_ops = {
  458. .alloc = mc_irq_msi_domain_alloc,
  459. .free = mc_irq_msi_domain_free,
  460. };
  461. static struct irq_chip mc_msi_irq_chip = {
  462. .name = "Microchip PCIe MSI",
  463. .irq_ack = irq_chip_ack_parent,
  464. .irq_mask = pci_msi_mask_irq,
  465. .irq_unmask = pci_msi_unmask_irq,
  466. };
  467. static struct msi_domain_info mc_msi_domain_info = {
  468. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  469. MSI_FLAG_PCI_MSIX),
  470. .chip = &mc_msi_irq_chip,
  471. };
  472. static int mc_allocate_msi_domains(struct mc_pcie *port)
  473. {
  474. struct device *dev = port->dev;
  475. struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
  476. struct mc_msi *msi = &port->msi;
  477. mutex_init(&port->msi.lock);
  478. msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
  479. &msi_domain_ops, port);
  480. if (!msi->dev_domain) {
  481. dev_err(dev, "failed to create IRQ domain\n");
  482. return -ENOMEM;
  483. }
  484. msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
  485. msi->dev_domain);
  486. if (!msi->msi_domain) {
  487. dev_err(dev, "failed to create MSI domain\n");
  488. irq_domain_remove(msi->dev_domain);
  489. return -ENOMEM;
  490. }
  491. return 0;
  492. }
  493. static void mc_handle_intx(struct irq_desc *desc)
  494. {
  495. struct mc_pcie *port = irq_desc_get_handler_data(desc);
  496. struct irq_chip *chip = irq_desc_get_chip(desc);
  497. struct device *dev = port->dev;
  498. void __iomem *bridge_base_addr =
  499. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  500. unsigned long status;
  501. u32 bit;
  502. int ret;
  503. chained_irq_enter(chip, desc);
  504. status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
  505. if (status & PM_MSI_INT_INTX_MASK) {
  506. status &= PM_MSI_INT_INTX_MASK;
  507. status >>= PM_MSI_INT_INTX_SHIFT;
  508. for_each_set_bit(bit, &status, PCI_NUM_INTX) {
  509. ret = generic_handle_domain_irq(port->intx_domain, bit);
  510. if (ret)
  511. dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
  512. bit);
  513. }
  514. }
  515. chained_irq_exit(chip, desc);
  516. }
  517. static void mc_ack_intx_irq(struct irq_data *data)
  518. {
  519. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  520. void __iomem *bridge_base_addr =
  521. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  522. u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
  523. writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
  524. }
  525. static void mc_mask_intx_irq(struct irq_data *data)
  526. {
  527. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  528. void __iomem *bridge_base_addr =
  529. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  530. unsigned long flags;
  531. u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
  532. u32 val;
  533. raw_spin_lock_irqsave(&port->lock, flags);
  534. val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
  535. val &= ~mask;
  536. writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
  537. raw_spin_unlock_irqrestore(&port->lock, flags);
  538. }
  539. static void mc_unmask_intx_irq(struct irq_data *data)
  540. {
  541. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  542. void __iomem *bridge_base_addr =
  543. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  544. unsigned long flags;
  545. u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
  546. u32 val;
  547. raw_spin_lock_irqsave(&port->lock, flags);
  548. val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
  549. val |= mask;
  550. writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
  551. raw_spin_unlock_irqrestore(&port->lock, flags);
  552. }
  553. static struct irq_chip mc_intx_irq_chip = {
  554. .name = "Microchip PCIe INTx",
  555. .irq_ack = mc_ack_intx_irq,
  556. .irq_mask = mc_mask_intx_irq,
  557. .irq_unmask = mc_unmask_intx_irq,
  558. };
  559. static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
  560. irq_hw_number_t hwirq)
  561. {
  562. irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
  563. irq_set_chip_data(irq, domain->host_data);
  564. return 0;
  565. }
  566. static const struct irq_domain_ops intx_domain_ops = {
  567. .map = mc_pcie_intx_map,
  568. };
  569. static inline u32 reg_to_event(u32 reg, struct event_map field)
  570. {
  571. return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
  572. }
  573. static u32 pcie_events(void __iomem *addr)
  574. {
  575. u32 reg = readl_relaxed(addr);
  576. u32 val = 0;
  577. int i;
  578. for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
  579. val |= reg_to_event(reg, pcie_event_to_event[i]);
  580. return val;
  581. }
  582. static u32 sec_errors(void __iomem *addr)
  583. {
  584. u32 reg = readl_relaxed(addr);
  585. u32 val = 0;
  586. int i;
  587. for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
  588. val |= reg_to_event(reg, sec_error_to_event[i]);
  589. return val;
  590. }
  591. static u32 ded_errors(void __iomem *addr)
  592. {
  593. u32 reg = readl_relaxed(addr);
  594. u32 val = 0;
  595. int i;
  596. for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
  597. val |= reg_to_event(reg, ded_error_to_event[i]);
  598. return val;
  599. }
  600. static u32 local_events(void __iomem *addr)
  601. {
  602. u32 reg = readl_relaxed(addr);
  603. u32 val = 0;
  604. int i;
  605. for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
  606. val |= reg_to_event(reg, local_status_to_event[i]);
  607. return val;
  608. }
  609. static u32 get_events(struct mc_pcie *port)
  610. {
  611. void __iomem *bridge_base_addr =
  612. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  613. void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
  614. u32 events = 0;
  615. events |= pcie_events(ctrl_base_addr + PCIE_EVENT_INT);
  616. events |= sec_errors(ctrl_base_addr + SEC_ERROR_INT);
  617. events |= ded_errors(ctrl_base_addr + DED_ERROR_INT);
  618. events |= local_events(bridge_base_addr + ISTATUS_LOCAL);
  619. return events;
  620. }
  621. static irqreturn_t mc_event_handler(int irq, void *dev_id)
  622. {
  623. struct mc_pcie *port = dev_id;
  624. struct device *dev = port->dev;
  625. struct irq_data *data;
  626. data = irq_domain_get_irq_data(port->event_domain, irq);
  627. if (event_cause[data->hwirq].str)
  628. dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
  629. else
  630. dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
  631. return IRQ_HANDLED;
  632. }
  633. static void mc_handle_event(struct irq_desc *desc)
  634. {
  635. struct mc_pcie *port = irq_desc_get_handler_data(desc);
  636. unsigned long events;
  637. u32 bit;
  638. struct irq_chip *chip = irq_desc_get_chip(desc);
  639. chained_irq_enter(chip, desc);
  640. events = get_events(port);
  641. for_each_set_bit(bit, &events, NUM_EVENTS)
  642. generic_handle_domain_irq(port->event_domain, bit);
  643. chained_irq_exit(chip, desc);
  644. }
  645. static void mc_ack_event_irq(struct irq_data *data)
  646. {
  647. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  648. u32 event = data->hwirq;
  649. void __iomem *addr;
  650. u32 mask;
  651. addr = port->axi_base_addr + event_descs[event].base +
  652. event_descs[event].offset;
  653. mask = event_descs[event].mask;
  654. mask |= event_descs[event].enb_mask;
  655. writel_relaxed(mask, addr);
  656. }
  657. static void mc_mask_event_irq(struct irq_data *data)
  658. {
  659. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  660. u32 event = data->hwirq;
  661. void __iomem *addr;
  662. u32 mask;
  663. u32 val;
  664. addr = port->axi_base_addr + event_descs[event].base +
  665. event_descs[event].mask_offset;
  666. mask = event_descs[event].mask;
  667. if (event_descs[event].enb_mask) {
  668. mask <<= PCIE_EVENT_INT_ENB_SHIFT;
  669. mask &= PCIE_EVENT_INT_ENB_MASK;
  670. }
  671. if (!event_descs[event].mask_high)
  672. mask = ~mask;
  673. raw_spin_lock(&port->lock);
  674. val = readl_relaxed(addr);
  675. if (event_descs[event].mask_high)
  676. val |= mask;
  677. else
  678. val &= mask;
  679. writel_relaxed(val, addr);
  680. raw_spin_unlock(&port->lock);
  681. }
  682. static void mc_unmask_event_irq(struct irq_data *data)
  683. {
  684. struct mc_pcie *port = irq_data_get_irq_chip_data(data);
  685. u32 event = data->hwirq;
  686. void __iomem *addr;
  687. u32 mask;
  688. u32 val;
  689. addr = port->axi_base_addr + event_descs[event].base +
  690. event_descs[event].mask_offset;
  691. mask = event_descs[event].mask;
  692. if (event_descs[event].enb_mask)
  693. mask <<= PCIE_EVENT_INT_ENB_SHIFT;
  694. if (event_descs[event].mask_high)
  695. mask = ~mask;
  696. if (event_descs[event].enb_mask)
  697. mask &= PCIE_EVENT_INT_ENB_MASK;
  698. raw_spin_lock(&port->lock);
  699. val = readl_relaxed(addr);
  700. if (event_descs[event].mask_high)
  701. val &= mask;
  702. else
  703. val |= mask;
  704. writel_relaxed(val, addr);
  705. raw_spin_unlock(&port->lock);
  706. }
  707. static struct irq_chip mc_event_irq_chip = {
  708. .name = "Microchip PCIe EVENT",
  709. .irq_ack = mc_ack_event_irq,
  710. .irq_mask = mc_mask_event_irq,
  711. .irq_unmask = mc_unmask_event_irq,
  712. };
  713. static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
  714. irq_hw_number_t hwirq)
  715. {
  716. irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
  717. irq_set_chip_data(irq, domain->host_data);
  718. return 0;
  719. }
  720. static const struct irq_domain_ops event_domain_ops = {
  721. .map = mc_pcie_event_map,
  722. };
  723. static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
  724. {
  725. struct clk *clk;
  726. int ret;
  727. clk = devm_clk_get_optional(dev, id);
  728. if (IS_ERR(clk))
  729. return clk;
  730. if (!clk)
  731. return clk;
  732. ret = clk_prepare_enable(clk);
  733. if (ret)
  734. return ERR_PTR(ret);
  735. devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare,
  736. clk);
  737. return clk;
  738. }
  739. static int mc_pcie_init_clks(struct device *dev)
  740. {
  741. int i;
  742. struct clk *fic;
  743. /*
  744. * PCIe may be clocked via Fabric Interface using between 1 and 4
  745. * clocks. Scan DT for clocks and enable them if present
  746. */
  747. for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
  748. fic = mc_pcie_init_clk(dev, poss_clks[i]);
  749. if (IS_ERR(fic))
  750. return PTR_ERR(fic);
  751. }
  752. return 0;
  753. }
  754. static int mc_pcie_init_irq_domains(struct mc_pcie *port)
  755. {
  756. struct device *dev = port->dev;
  757. struct device_node *node = dev->of_node;
  758. struct device_node *pcie_intc_node;
  759. /* Setup INTx */
  760. pcie_intc_node = of_get_next_child(node, NULL);
  761. if (!pcie_intc_node) {
  762. dev_err(dev, "failed to find PCIe Intc node\n");
  763. return -EINVAL;
  764. }
  765. port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
  766. &event_domain_ops, port);
  767. if (!port->event_domain) {
  768. dev_err(dev, "failed to get event domain\n");
  769. of_node_put(pcie_intc_node);
  770. return -ENOMEM;
  771. }
  772. irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
  773. port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  774. &intx_domain_ops, port);
  775. if (!port->intx_domain) {
  776. dev_err(dev, "failed to get an INTx IRQ domain\n");
  777. of_node_put(pcie_intc_node);
  778. return -ENOMEM;
  779. }
  780. irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
  781. of_node_put(pcie_intc_node);
  782. raw_spin_lock_init(&port->lock);
  783. return mc_allocate_msi_domains(port);
  784. }
  785. static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
  786. phys_addr_t axi_addr, phys_addr_t pci_addr,
  787. size_t size)
  788. {
  789. u32 atr_sz = ilog2(size) - 1;
  790. u32 val;
  791. if (index == 0)
  792. val = PCIE_CONFIG_INTERFACE;
  793. else
  794. val = PCIE_TX_RX_INTERFACE;
  795. writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
  796. ATR0_AXI4_SLV0_TRSL_PARAM);
  797. val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
  798. ATR_IMPL_ENABLE;
  799. writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
  800. ATR0_AXI4_SLV0_SRCADDR_PARAM);
  801. val = upper_32_bits(axi_addr);
  802. writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
  803. ATR0_AXI4_SLV0_SRC_ADDR);
  804. val = lower_32_bits(pci_addr);
  805. writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
  806. ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
  807. val = upper_32_bits(pci_addr);
  808. writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
  809. ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
  810. val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
  811. val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
  812. writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
  813. writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
  814. }
  815. static int mc_pcie_setup_windows(struct platform_device *pdev,
  816. struct mc_pcie *port)
  817. {
  818. void __iomem *bridge_base_addr =
  819. port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  820. struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
  821. struct resource_entry *entry;
  822. u64 pci_addr;
  823. u32 index = 1;
  824. resource_list_for_each_entry(entry, &bridge->windows) {
  825. if (resource_type(entry->res) == IORESOURCE_MEM) {
  826. pci_addr = entry->res->start - entry->offset;
  827. mc_pcie_setup_window(bridge_base_addr, index,
  828. entry->res->start, pci_addr,
  829. resource_size(entry->res));
  830. index++;
  831. }
  832. }
  833. return 0;
  834. }
  835. static int mc_platform_init(struct pci_config_window *cfg)
  836. {
  837. struct device *dev = cfg->parent;
  838. struct platform_device *pdev = to_platform_device(dev);
  839. struct mc_pcie *port;
  840. void __iomem *bridge_base_addr;
  841. void __iomem *ctrl_base_addr;
  842. int ret;
  843. int irq;
  844. int i, intx_irq, msi_irq, event_irq;
  845. u32 val;
  846. int err;
  847. port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
  848. if (!port)
  849. return -ENOMEM;
  850. port->dev = dev;
  851. ret = mc_pcie_init_clks(dev);
  852. if (ret) {
  853. dev_err(dev, "failed to get clock resources, error %d\n", ret);
  854. return -ENODEV;
  855. }
  856. port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
  857. if (IS_ERR(port->axi_base_addr))
  858. return PTR_ERR(port->axi_base_addr);
  859. bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
  860. ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
  861. port->msi.vector_phy = MSI_ADDR;
  862. port->msi.num_vectors = MC_NUM_MSI_IRQS;
  863. ret = mc_pcie_init_irq_domains(port);
  864. if (ret) {
  865. dev_err(dev, "failed creating IRQ domains\n");
  866. return ret;
  867. }
  868. irq = platform_get_irq(pdev, 0);
  869. if (irq < 0)
  870. return -ENODEV;
  871. for (i = 0; i < NUM_EVENTS; i++) {
  872. event_irq = irq_create_mapping(port->event_domain, i);
  873. if (!event_irq) {
  874. dev_err(dev, "failed to map hwirq %d\n", i);
  875. return -ENXIO;
  876. }
  877. err = devm_request_irq(dev, event_irq, mc_event_handler,
  878. 0, event_cause[i].sym, port);
  879. if (err) {
  880. dev_err(dev, "failed to request IRQ %d\n", event_irq);
  881. return err;
  882. }
  883. }
  884. intx_irq = irq_create_mapping(port->event_domain,
  885. EVENT_LOCAL_PM_MSI_INT_INTX);
  886. if (!intx_irq) {
  887. dev_err(dev, "failed to map INTx interrupt\n");
  888. return -ENXIO;
  889. }
  890. /* Plug the INTx chained handler */
  891. irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
  892. msi_irq = irq_create_mapping(port->event_domain,
  893. EVENT_LOCAL_PM_MSI_INT_MSI);
  894. if (!msi_irq)
  895. return -ENXIO;
  896. /* Plug the MSI chained handler */
  897. irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
  898. /* Plug the main event chained handler */
  899. irq_set_chained_handler_and_data(irq, mc_handle_event, port);
  900. /* Hardware doesn't setup MSI by default */
  901. mc_pcie_enable_msi(port, cfg->win);
  902. val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
  903. val |= PM_MSI_INT_INTX_MASK;
  904. writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
  905. writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
  906. val = PCIE_EVENT_INT_L2_EXIT_INT |
  907. PCIE_EVENT_INT_HOTRST_EXIT_INT |
  908. PCIE_EVENT_INT_DLUP_EXIT_INT;
  909. writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
  910. val = SEC_ERROR_INT_TX_RAM_SEC_ERR_INT |
  911. SEC_ERROR_INT_RX_RAM_SEC_ERR_INT |
  912. SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT |
  913. SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT;
  914. writel_relaxed(val, ctrl_base_addr + SEC_ERROR_INT);
  915. writel_relaxed(0, ctrl_base_addr + SEC_ERROR_INT_MASK);
  916. writel_relaxed(0, ctrl_base_addr + SEC_ERROR_CNT);
  917. val = DED_ERROR_INT_TX_RAM_DED_ERR_INT |
  918. DED_ERROR_INT_RX_RAM_DED_ERR_INT |
  919. DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT |
  920. DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT;
  921. writel_relaxed(val, ctrl_base_addr + DED_ERROR_INT);
  922. writel_relaxed(0, ctrl_base_addr + DED_ERROR_INT_MASK);
  923. writel_relaxed(0, ctrl_base_addr + DED_ERROR_CNT);
  924. writel_relaxed(0, bridge_base_addr + IMASK_HOST);
  925. writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
  926. /* Configure Address Translation Table 0 for PCIe config space */
  927. mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start & 0xffffffff,
  928. cfg->res.start, resource_size(&cfg->res));
  929. return mc_pcie_setup_windows(pdev, port);
  930. }
  931. static const struct pci_ecam_ops mc_ecam_ops = {
  932. .init = mc_platform_init,
  933. .pci_ops = {
  934. .map_bus = pci_ecam_map_bus,
  935. .read = pci_generic_config_read,
  936. .write = pci_generic_config_write,
  937. }
  938. };
  939. static const struct of_device_id mc_pcie_of_match[] = {
  940. {
  941. .compatible = "microchip,pcie-host-1.0",
  942. .data = &mc_ecam_ops,
  943. },
  944. {},
  945. };
  946. MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
  947. static struct platform_driver mc_pcie_driver = {
  948. .probe = pci_host_common_probe,
  949. .driver = {
  950. .name = "microchip-pcie",
  951. .of_match_table = mc_pcie_of_match,
  952. .suppress_bind_attrs = true,
  953. },
  954. };
  955. builtin_platform_driver(mc_pcie_driver);
  956. MODULE_LICENSE("GPL");
  957. MODULE_DESCRIPTION("Microchip PCIe host controller driver");
  958. MODULE_AUTHOR("Daire McNamara <[email protected]>");