pci-aardvark.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for the Aardvark PCIe controller, used on Marvell Armada
  4. * 3700.
  5. *
  6. * Copyright (C) 2016 Marvell
  7. *
  8. * Author: Hezi Shahmoon <[email protected]>
  9. */
  10. #include <linux/bitfield.h>
  11. #include <linux/delay.h>
  12. #include <linux/gpio/consumer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/pci.h>
  19. #include <linux/pci-ecam.h>
  20. #include <linux/init.h>
  21. #include <linux/phy/phy.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/msi.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_gpio.h>
  26. #include <linux/of_pci.h>
  27. #include "../pci.h"
  28. #include "../pci-bridge-emul.h"
  29. /* PCIe core registers */
  30. #define PCIE_CORE_DEV_ID_REG 0x0
  31. #define PCIE_CORE_CMD_STATUS_REG 0x4
  32. #define PCIE_CORE_DEV_REV_REG 0x8
  33. #define PCIE_CORE_SSDEV_ID_REG 0x2c
  34. #define PCIE_CORE_PCIEXP_CAP 0xc0
  35. #define PCIE_CORE_PCIERR_CAP 0x100
  36. #define PCIE_CORE_ERR_CAPCTL_REG 0x118
  37. #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
  38. #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
  39. #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
  40. #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
  41. /* PIO registers base address and register offsets */
  42. #define PIO_BASE_ADDR 0x4000
  43. #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
  44. #define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
  45. #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
  46. #define PIO_STAT (PIO_BASE_ADDR + 0x4)
  47. #define PIO_COMPLETION_STATUS_SHIFT 7
  48. #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
  49. #define PIO_COMPLETION_STATUS_OK 0
  50. #define PIO_COMPLETION_STATUS_UR 1
  51. #define PIO_COMPLETION_STATUS_CRS 2
  52. #define PIO_COMPLETION_STATUS_CA 4
  53. #define PIO_NON_POSTED_REQ BIT(10)
  54. #define PIO_ERR_STATUS BIT(11)
  55. #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
  56. #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
  57. #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
  58. #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
  59. #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
  60. #define PIO_START (PIO_BASE_ADDR + 0x1c)
  61. #define PIO_ISR (PIO_BASE_ADDR + 0x20)
  62. #define PIO_ISRM (PIO_BASE_ADDR + 0x24)
  63. /* Aardvark Control registers */
  64. #define CONTROL_BASE_ADDR 0x4800
  65. #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
  66. #define PCIE_GEN_SEL_MSK 0x3
  67. #define PCIE_GEN_SEL_SHIFT 0x0
  68. #define SPEED_GEN_1 0
  69. #define SPEED_GEN_2 1
  70. #define SPEED_GEN_3 2
  71. #define IS_RC_MSK 1
  72. #define IS_RC_SHIFT 2
  73. #define LANE_CNT_MSK 0x18
  74. #define LANE_CNT_SHIFT 0x3
  75. #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
  76. #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
  77. #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
  78. #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
  79. #define LINK_TRAINING_EN BIT(6)
  80. #define LEGACY_INTA BIT(28)
  81. #define LEGACY_INTB BIT(29)
  82. #define LEGACY_INTC BIT(30)
  83. #define LEGACY_INTD BIT(31)
  84. #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
  85. #define HOT_RESET_GEN BIT(0)
  86. #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
  87. #define PCIE_CORE_CTRL2_RESERVED 0x7
  88. #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
  89. #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
  90. #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
  91. #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
  92. #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
  93. #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
  94. #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
  95. #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
  96. #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
  97. #define PCIE_MSG_PM_PME_MASK BIT(7)
  98. #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
  99. #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
  100. #define PCIE_ISR0_CORR_ERR BIT(11)
  101. #define PCIE_ISR0_NFAT_ERR BIT(12)
  102. #define PCIE_ISR0_FAT_ERR BIT(13)
  103. #define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
  104. #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
  105. #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
  106. #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
  107. #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
  108. #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
  109. #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
  110. #define PCIE_ISR1_FLUSH BIT(5)
  111. #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
  112. #define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
  113. #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
  114. #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
  115. #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
  116. #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
  117. #define PCIE_MSI_ALL_MASK GENMASK(31, 0)
  118. #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
  119. #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
  120. /* PCIe window configuration */
  121. #define OB_WIN_BASE_ADDR 0x4c00
  122. #define OB_WIN_BLOCK_SIZE 0x20
  123. #define OB_WIN_COUNT 8
  124. #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
  125. OB_WIN_BLOCK_SIZE * (win) + \
  126. (offset))
  127. #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
  128. #define OB_WIN_ENABLE BIT(0)
  129. #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
  130. #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
  131. #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
  132. #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
  133. #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
  134. #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
  135. #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
  136. #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
  137. #define OB_WIN_FUNC_NUM_SHIFT 24
  138. #define OB_WIN_FUNC_NUM_ENABLE BIT(23)
  139. #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
  140. #define OB_WIN_BUS_NUM_BITS_SHIFT 20
  141. #define OB_WIN_MSG_CODE_ENABLE BIT(22)
  142. #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
  143. #define OB_WIN_MSG_CODE_SHIFT 14
  144. #define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
  145. #define OB_WIN_ATTR_ENABLE BIT(11)
  146. #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
  147. #define OB_WIN_ATTR_TC_SHIFT 8
  148. #define OB_WIN_ATTR_RELAXED BIT(7)
  149. #define OB_WIN_ATTR_NOSNOOP BIT(6)
  150. #define OB_WIN_ATTR_POISON BIT(5)
  151. #define OB_WIN_ATTR_IDO BIT(4)
  152. #define OB_WIN_TYPE_MASK GENMASK(3, 0)
  153. #define OB_WIN_TYPE_SHIFT 0
  154. #define OB_WIN_TYPE_MEM 0x0
  155. #define OB_WIN_TYPE_IO 0x4
  156. #define OB_WIN_TYPE_CONFIG_TYPE0 0x8
  157. #define OB_WIN_TYPE_CONFIG_TYPE1 0x9
  158. #define OB_WIN_TYPE_MSG 0xc
  159. /* LMI registers base address and register offsets */
  160. #define LMI_BASE_ADDR 0x6000
  161. #define CFG_REG (LMI_BASE_ADDR + 0x0)
  162. #define LTSSM_SHIFT 24
  163. #define LTSSM_MASK 0x3f
  164. #define RC_BAR_CONFIG 0x300
  165. /* LTSSM values in CFG_REG */
  166. enum {
  167. LTSSM_DETECT_QUIET = 0x0,
  168. LTSSM_DETECT_ACTIVE = 0x1,
  169. LTSSM_POLLING_ACTIVE = 0x2,
  170. LTSSM_POLLING_COMPLIANCE = 0x3,
  171. LTSSM_POLLING_CONFIGURATION = 0x4,
  172. LTSSM_CONFIG_LINKWIDTH_START = 0x5,
  173. LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
  174. LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
  175. LTSSM_CONFIG_LANENUM_WAIT = 0x8,
  176. LTSSM_CONFIG_COMPLETE = 0x9,
  177. LTSSM_CONFIG_IDLE = 0xa,
  178. LTSSM_RECOVERY_RCVR_LOCK = 0xb,
  179. LTSSM_RECOVERY_SPEED = 0xc,
  180. LTSSM_RECOVERY_RCVR_CFG = 0xd,
  181. LTSSM_RECOVERY_IDLE = 0xe,
  182. LTSSM_L0 = 0x10,
  183. LTSSM_RX_L0S_ENTRY = 0x11,
  184. LTSSM_RX_L0S_IDLE = 0x12,
  185. LTSSM_RX_L0S_FTS = 0x13,
  186. LTSSM_TX_L0S_ENTRY = 0x14,
  187. LTSSM_TX_L0S_IDLE = 0x15,
  188. LTSSM_TX_L0S_FTS = 0x16,
  189. LTSSM_L1_ENTRY = 0x17,
  190. LTSSM_L1_IDLE = 0x18,
  191. LTSSM_L2_IDLE = 0x19,
  192. LTSSM_L2_TRANSMIT_WAKE = 0x1a,
  193. LTSSM_DISABLED = 0x20,
  194. LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
  195. LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
  196. LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
  197. LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
  198. LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
  199. LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
  200. LTSSM_HOT_RESET = 0x27,
  201. LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
  202. LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
  203. LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
  204. LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
  205. };
  206. #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
  207. /* PCIe core controller registers */
  208. #define CTRL_CORE_BASE_ADDR 0x18000
  209. #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
  210. #define CTRL_MODE_SHIFT 0x0
  211. #define CTRL_MODE_MASK 0x1
  212. #define PCIE_CORE_MODE_DIRECT 0x0
  213. #define PCIE_CORE_MODE_COMMAND 0x1
  214. /* PCIe Central Interrupts Registers */
  215. #define CENTRAL_INT_BASE_ADDR 0x1b000
  216. #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
  217. #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
  218. #define PCIE_IRQ_CMDQ_INT BIT(0)
  219. #define PCIE_IRQ_MSI_STATUS_INT BIT(1)
  220. #define PCIE_IRQ_CMD_SENT_DONE BIT(3)
  221. #define PCIE_IRQ_DMA_INT BIT(4)
  222. #define PCIE_IRQ_IB_DXFERDONE BIT(5)
  223. #define PCIE_IRQ_OB_DXFERDONE BIT(6)
  224. #define PCIE_IRQ_OB_RXFERDONE BIT(7)
  225. #define PCIE_IRQ_COMPQ_INT BIT(12)
  226. #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
  227. #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
  228. #define PCIE_IRQ_CORE_INT BIT(16)
  229. #define PCIE_IRQ_CORE_INT_PIO BIT(17)
  230. #define PCIE_IRQ_DPMU_INT BIT(18)
  231. #define PCIE_IRQ_PCIE_MIS_INT BIT(19)
  232. #define PCIE_IRQ_MSI_INT1_DET BIT(20)
  233. #define PCIE_IRQ_MSI_INT2_DET BIT(21)
  234. #define PCIE_IRQ_RC_DBELL_DET BIT(22)
  235. #define PCIE_IRQ_EP_STATUS BIT(23)
  236. #define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
  237. #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
  238. /* Transaction types */
  239. #define PCIE_CONFIG_RD_TYPE0 0x8
  240. #define PCIE_CONFIG_RD_TYPE1 0x9
  241. #define PCIE_CONFIG_WR_TYPE0 0xa
  242. #define PCIE_CONFIG_WR_TYPE1 0xb
  243. #define PIO_RETRY_CNT 750000 /* 1.5 s */
  244. #define PIO_RETRY_DELAY 2 /* 2 us*/
  245. #define LINK_WAIT_MAX_RETRIES 10
  246. #define LINK_WAIT_USLEEP_MIN 90000
  247. #define LINK_WAIT_USLEEP_MAX 100000
  248. #define RETRAIN_WAIT_MAX_RETRIES 10
  249. #define RETRAIN_WAIT_USLEEP_US 2000
  250. #define MSI_IRQ_NUM 32
  251. #define CFG_RD_CRS_VAL 0xffff0001
  252. struct advk_pcie {
  253. struct platform_device *pdev;
  254. void __iomem *base;
  255. struct {
  256. phys_addr_t match;
  257. phys_addr_t remap;
  258. phys_addr_t mask;
  259. u32 actions;
  260. } wins[OB_WIN_COUNT];
  261. u8 wins_count;
  262. struct irq_domain *rp_irq_domain;
  263. struct irq_domain *irq_domain;
  264. struct irq_chip irq_chip;
  265. raw_spinlock_t irq_lock;
  266. struct irq_domain *msi_domain;
  267. struct irq_domain *msi_inner_domain;
  268. raw_spinlock_t msi_irq_lock;
  269. DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
  270. struct mutex msi_used_lock;
  271. int link_gen;
  272. struct pci_bridge_emul bridge;
  273. struct gpio_desc *reset_gpio;
  274. struct phy *phy;
  275. };
  276. static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
  277. {
  278. writel(val, pcie->base + reg);
  279. }
  280. static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
  281. {
  282. return readl(pcie->base + reg);
  283. }
  284. static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
  285. {
  286. u32 val;
  287. u8 ltssm_state;
  288. val = advk_readl(pcie, CFG_REG);
  289. ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
  290. return ltssm_state;
  291. }
  292. static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
  293. {
  294. /* check if LTSSM is in normal operation - some L* state */
  295. u8 ltssm_state = advk_pcie_ltssm_state(pcie);
  296. return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
  297. }
  298. static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
  299. {
  300. /*
  301. * According to PCIe Base specification 3.0, Table 4-14: Link
  302. * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
  303. * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
  304. * L0s, L1 and L2 states. And according to 3.2.1. Data Link
  305. * Control and Management State Machine Rules is DL Up status
  306. * reported in DL Active state.
  307. */
  308. u8 ltssm_state = advk_pcie_ltssm_state(pcie);
  309. return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
  310. }
  311. static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
  312. {
  313. /*
  314. * According to PCIe Base specification 3.0, Table 4-14: Link
  315. * Status Mapped to the LTSSM is Link Training mapped to LTSSM
  316. * Configuration and Recovery states.
  317. */
  318. u8 ltssm_state = advk_pcie_ltssm_state(pcie);
  319. return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
  320. ltssm_state < LTSSM_L0) ||
  321. (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
  322. ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
  323. }
  324. static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
  325. {
  326. int retries;
  327. /* check if the link is up or not */
  328. for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
  329. if (advk_pcie_link_up(pcie))
  330. return 0;
  331. usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
  332. }
  333. return -ETIMEDOUT;
  334. }
  335. static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
  336. {
  337. size_t retries;
  338. for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
  339. if (advk_pcie_link_training(pcie))
  340. break;
  341. udelay(RETRAIN_WAIT_USLEEP_US);
  342. }
  343. }
  344. static void advk_pcie_issue_perst(struct advk_pcie *pcie)
  345. {
  346. if (!pcie->reset_gpio)
  347. return;
  348. /* 10ms delay is needed for some cards */
  349. dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
  350. gpiod_set_value_cansleep(pcie->reset_gpio, 1);
  351. usleep_range(10000, 11000);
  352. gpiod_set_value_cansleep(pcie->reset_gpio, 0);
  353. }
  354. static void advk_pcie_train_link(struct advk_pcie *pcie)
  355. {
  356. struct device *dev = &pcie->pdev->dev;
  357. u32 reg;
  358. int ret;
  359. /*
  360. * Setup PCIe rev / gen compliance based on device tree property
  361. * 'max-link-speed' which also forces maximal link speed.
  362. */
  363. reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
  364. reg &= ~PCIE_GEN_SEL_MSK;
  365. if (pcie->link_gen == 3)
  366. reg |= SPEED_GEN_3;
  367. else if (pcie->link_gen == 2)
  368. reg |= SPEED_GEN_2;
  369. else
  370. reg |= SPEED_GEN_1;
  371. advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
  372. /*
  373. * Set maximal link speed value also into PCIe Link Control 2 register.
  374. * Armada 3700 Functional Specification says that default value is based
  375. * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
  376. */
  377. reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
  378. reg &= ~PCI_EXP_LNKCTL2_TLS;
  379. if (pcie->link_gen == 3)
  380. reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
  381. else if (pcie->link_gen == 2)
  382. reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
  383. else
  384. reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
  385. advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
  386. /* Enable link training after selecting PCIe generation */
  387. reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
  388. reg |= LINK_TRAINING_EN;
  389. advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
  390. /*
  391. * Reset PCIe card via PERST# signal. Some cards are not detected
  392. * during link training when they are in some non-initial state.
  393. */
  394. advk_pcie_issue_perst(pcie);
  395. /*
  396. * PERST# signal could have been asserted by pinctrl subsystem before
  397. * probe() callback has been called or issued explicitly by reset gpio
  398. * function advk_pcie_issue_perst(), making the endpoint going into
  399. * fundamental reset. As required by PCI Express spec (PCI Express
  400. * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
  401. * Conventional Reset) a delay for at least 100ms after such a reset
  402. * before sending a Configuration Request to the device is needed.
  403. * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
  404. * waits for link at least 900ms.
  405. */
  406. ret = advk_pcie_wait_for_link(pcie);
  407. if (ret < 0)
  408. dev_err(dev, "link never came up\n");
  409. else
  410. dev_info(dev, "link up\n");
  411. }
  412. /*
  413. * Set PCIe address window register which could be used for memory
  414. * mapping.
  415. */
  416. static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
  417. phys_addr_t match, phys_addr_t remap,
  418. phys_addr_t mask, u32 actions)
  419. {
  420. advk_writel(pcie, OB_WIN_ENABLE |
  421. lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
  422. advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
  423. advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
  424. advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
  425. advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
  426. advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
  427. advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
  428. }
  429. static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
  430. {
  431. advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
  432. advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
  433. advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
  434. advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
  435. advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
  436. advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
  437. advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
  438. }
  439. static void advk_pcie_setup_hw(struct advk_pcie *pcie)
  440. {
  441. phys_addr_t msi_addr;
  442. u32 reg;
  443. int i;
  444. /*
  445. * Configure PCIe Reference clock. Direction is from the PCIe
  446. * controller to the endpoint card, so enable transmitting of
  447. * Reference clock differential signal off-chip and disable
  448. * receiving off-chip differential signal.
  449. */
  450. reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
  451. reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
  452. reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
  453. advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
  454. /* Set to Direct mode */
  455. reg = advk_readl(pcie, CTRL_CONFIG_REG);
  456. reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
  457. reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
  458. advk_writel(pcie, reg, CTRL_CONFIG_REG);
  459. /* Set PCI global control register to RC mode */
  460. reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
  461. reg |= (IS_RC_MSK << IS_RC_SHIFT);
  462. advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
  463. /*
  464. * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
  465. * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
  466. * id in high 16 bits. Updating this register changes readback value of
  467. * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
  468. * for erratum 4.1: "The value of device and vendor ID is incorrect".
  469. */
  470. reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
  471. advk_writel(pcie, reg, VENDOR_ID_REG);
  472. /*
  473. * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
  474. * because the default value is Mass storage controller (0x010400).
  475. *
  476. * Note that this Aardvark PCI Bridge does not have compliant Type 1
  477. * Configuration Space and it even cannot be accessed via Aardvark's
  478. * PCI config space access method. Something like config space is
  479. * available in internal Aardvark registers starting at offset 0x0
  480. * and is reported as Type 0. In range 0x10 - 0x34 it has totally
  481. * different registers.
  482. *
  483. * Therefore driver uses emulation of PCI Bridge which emulates
  484. * access to configuration space via internal Aardvark registers or
  485. * emulated configuration buffer.
  486. */
  487. reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
  488. reg &= ~0xffffff00;
  489. reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
  490. advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
  491. /* Disable Root Bridge I/O space, memory space and bus mastering */
  492. reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
  493. reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  494. advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
  495. /* Set Advanced Error Capabilities and Control PF0 register */
  496. reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
  497. PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
  498. PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
  499. PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
  500. advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
  501. /* Set PCIe Device Control register */
  502. reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
  503. reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
  504. reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
  505. reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
  506. reg &= ~PCI_EXP_DEVCTL_READRQ;
  507. reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
  508. reg |= PCI_EXP_DEVCTL_READRQ_512B;
  509. advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
  510. /* Program PCIe Control 2 to disable strict ordering */
  511. reg = PCIE_CORE_CTRL2_RESERVED |
  512. PCIE_CORE_CTRL2_TD_ENABLE;
  513. advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
  514. /* Set lane X1 */
  515. reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
  516. reg &= ~LANE_CNT_MSK;
  517. reg |= LANE_COUNT_1;
  518. advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
  519. /* Set MSI address */
  520. msi_addr = virt_to_phys(pcie);
  521. advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
  522. advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
  523. /* Enable MSI */
  524. reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
  525. reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
  526. advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
  527. /* Clear all interrupts */
  528. advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
  529. advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
  530. advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
  531. advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
  532. /* Disable All ISR0/1 and MSI Sources */
  533. advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
  534. advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
  535. advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
  536. /* Unmask summary MSI interrupt */
  537. reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
  538. reg &= ~PCIE_ISR0_MSI_INT_PENDING;
  539. advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
  540. /* Unmask PME interrupt for processing of PME requester */
  541. reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
  542. reg &= ~PCIE_MSG_PM_PME_MASK;
  543. advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
  544. /* Enable summary interrupt for GIC SPI source */
  545. reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
  546. advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
  547. /*
  548. * Enable AXI address window location generation:
  549. * When it is enabled, the default outbound window
  550. * configurations (Default User Field: 0xD0074CFC)
  551. * are used to transparent address translation for
  552. * the outbound transactions. Thus, PCIe address
  553. * windows are not required for transparent memory
  554. * access when default outbound window configuration
  555. * is set for memory access.
  556. */
  557. reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
  558. reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
  559. advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
  560. /*
  561. * Set memory access in Default User Field so it
  562. * is not required to configure PCIe address for
  563. * transparent memory access.
  564. */
  565. advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
  566. /*
  567. * Bypass the address window mapping for PIO:
  568. * Since PIO access already contains all required
  569. * info over AXI interface by PIO registers, the
  570. * address window is not required.
  571. */
  572. reg = advk_readl(pcie, PIO_CTRL);
  573. reg |= PIO_CTRL_ADDR_WIN_DISABLE;
  574. advk_writel(pcie, reg, PIO_CTRL);
  575. /*
  576. * Configure PCIe address windows for non-memory or
  577. * non-transparent access as by default PCIe uses
  578. * transparent memory access.
  579. */
  580. for (i = 0; i < pcie->wins_count; i++)
  581. advk_pcie_set_ob_win(pcie, i,
  582. pcie->wins[i].match, pcie->wins[i].remap,
  583. pcie->wins[i].mask, pcie->wins[i].actions);
  584. /* Disable remaining PCIe outbound windows */
  585. for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
  586. advk_pcie_disable_ob_win(pcie, i);
  587. advk_pcie_train_link(pcie);
  588. }
  589. static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
  590. {
  591. struct device *dev = &pcie->pdev->dev;
  592. u32 reg;
  593. unsigned int status;
  594. char *strcomp_status, *str_posted;
  595. int ret;
  596. reg = advk_readl(pcie, PIO_STAT);
  597. status = (reg & PIO_COMPLETION_STATUS_MASK) >>
  598. PIO_COMPLETION_STATUS_SHIFT;
  599. /*
  600. * According to HW spec, the PIO status check sequence as below:
  601. * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
  602. * it still needs to check Error Status(bit11), only when this bit
  603. * indicates no error happen, the operation is successful.
  604. * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
  605. * means a PIO write error, and for PIO read it is successful with
  606. * a read value of 0xFFFFFFFF.
  607. * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
  608. * only means a PIO write error, and for PIO read it is successful
  609. * with a read value of 0xFFFF0001.
  610. * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
  611. * error for both PIO read and PIO write operation.
  612. * 5) other errors are indicated as 'unknown'.
  613. */
  614. switch (status) {
  615. case PIO_COMPLETION_STATUS_OK:
  616. if (reg & PIO_ERR_STATUS) {
  617. strcomp_status = "COMP_ERR";
  618. ret = -EFAULT;
  619. break;
  620. }
  621. /* Get the read result */
  622. if (val)
  623. *val = advk_readl(pcie, PIO_RD_DATA);
  624. /* No error */
  625. strcomp_status = NULL;
  626. ret = 0;
  627. break;
  628. case PIO_COMPLETION_STATUS_UR:
  629. strcomp_status = "UR";
  630. ret = -EOPNOTSUPP;
  631. break;
  632. case PIO_COMPLETION_STATUS_CRS:
  633. if (allow_crs && val) {
  634. /* PCIe r4.0, sec 2.3.2, says:
  635. * If CRS Software Visibility is enabled:
  636. * For a Configuration Read Request that includes both
  637. * bytes of the Vendor ID field of a device Function's
  638. * Configuration Space Header, the Root Complex must
  639. * complete the Request to the host by returning a
  640. * read-data value of 0001h for the Vendor ID field and
  641. * all '1's for any additional bytes included in the
  642. * request.
  643. *
  644. * So CRS in this case is not an error status.
  645. */
  646. *val = CFG_RD_CRS_VAL;
  647. strcomp_status = NULL;
  648. ret = 0;
  649. break;
  650. }
  651. /* PCIe r4.0, sec 2.3.2, says:
  652. * If CRS Software Visibility is not enabled, the Root Complex
  653. * must re-issue the Configuration Request as a new Request.
  654. * If CRS Software Visibility is enabled: For a Configuration
  655. * Write Request or for any other Configuration Read Request,
  656. * the Root Complex must re-issue the Configuration Request as
  657. * a new Request.
  658. * A Root Complex implementation may choose to limit the number
  659. * of Configuration Request/CRS Completion Status loops before
  660. * determining that something is wrong with the target of the
  661. * Request and taking appropriate action, e.g., complete the
  662. * Request to the host as a failed transaction.
  663. *
  664. * So return -EAGAIN and caller (pci-aardvark.c driver) will
  665. * re-issue request again up to the PIO_RETRY_CNT retries.
  666. */
  667. strcomp_status = "CRS";
  668. ret = -EAGAIN;
  669. break;
  670. case PIO_COMPLETION_STATUS_CA:
  671. strcomp_status = "CA";
  672. ret = -ECANCELED;
  673. break;
  674. default:
  675. strcomp_status = "Unknown";
  676. ret = -EINVAL;
  677. break;
  678. }
  679. if (!strcomp_status)
  680. return ret;
  681. if (reg & PIO_NON_POSTED_REQ)
  682. str_posted = "Non-posted";
  683. else
  684. str_posted = "Posted";
  685. dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
  686. str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
  687. return ret;
  688. }
  689. static int advk_pcie_wait_pio(struct advk_pcie *pcie)
  690. {
  691. struct device *dev = &pcie->pdev->dev;
  692. int i;
  693. for (i = 1; i <= PIO_RETRY_CNT; i++) {
  694. u32 start, isr;
  695. start = advk_readl(pcie, PIO_START);
  696. isr = advk_readl(pcie, PIO_ISR);
  697. if (!start && isr)
  698. return i;
  699. udelay(PIO_RETRY_DELAY);
  700. }
  701. dev_err(dev, "PIO read/write transfer time out\n");
  702. return -ETIMEDOUT;
  703. }
  704. static pci_bridge_emul_read_status_t
  705. advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
  706. int reg, u32 *value)
  707. {
  708. struct advk_pcie *pcie = bridge->data;
  709. switch (reg) {
  710. case PCI_COMMAND:
  711. *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
  712. return PCI_BRIDGE_EMUL_HANDLED;
  713. case PCI_INTERRUPT_LINE: {
  714. /*
  715. * From the whole 32bit register we support reading from HW only
  716. * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
  717. * Other bits are retrieved only from emulated config buffer.
  718. */
  719. __le32 *cfgspace = (__le32 *)&bridge->conf;
  720. u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
  721. if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
  722. val &= ~(PCI_BRIDGE_CTL_SERR << 16);
  723. else
  724. val |= PCI_BRIDGE_CTL_SERR << 16;
  725. if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
  726. val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
  727. else
  728. val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
  729. *value = val;
  730. return PCI_BRIDGE_EMUL_HANDLED;
  731. }
  732. default:
  733. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  734. }
  735. }
  736. static void
  737. advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
  738. int reg, u32 old, u32 new, u32 mask)
  739. {
  740. struct advk_pcie *pcie = bridge->data;
  741. switch (reg) {
  742. case PCI_COMMAND:
  743. advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
  744. break;
  745. case PCI_INTERRUPT_LINE:
  746. /*
  747. * According to Figure 6-3: Pseudo Logic Diagram for Error
  748. * Message Controls in PCIe base specification, SERR# Enable bit
  749. * in Bridge Control register enable receiving of ERR_* messages
  750. */
  751. if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
  752. u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
  753. if (new & (PCI_BRIDGE_CTL_SERR << 16))
  754. val &= ~PCIE_ISR0_ERR_MASK;
  755. else
  756. val |= PCIE_ISR0_ERR_MASK;
  757. advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
  758. }
  759. if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
  760. u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
  761. if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
  762. val |= HOT_RESET_GEN;
  763. else
  764. val &= ~HOT_RESET_GEN;
  765. advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
  766. }
  767. break;
  768. default:
  769. break;
  770. }
  771. }
  772. static pci_bridge_emul_read_status_t
  773. advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
  774. int reg, u32 *value)
  775. {
  776. struct advk_pcie *pcie = bridge->data;
  777. switch (reg) {
  778. /*
  779. * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
  780. * also supported, but do not need to be handled here, because their
  781. * values are stored in emulated config space buffer, and we read them
  782. * from there when needed.
  783. */
  784. case PCI_EXP_LNKCAP: {
  785. u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
  786. /*
  787. * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
  788. * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
  789. * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
  790. */
  791. val |= PCI_EXP_LNKCAP_DLLLARC;
  792. *value = val;
  793. return PCI_BRIDGE_EMUL_HANDLED;
  794. }
  795. case PCI_EXP_LNKCTL: {
  796. /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
  797. u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
  798. ~(PCI_EXP_LNKSTA_LT << 16);
  799. if (advk_pcie_link_training(pcie))
  800. val |= (PCI_EXP_LNKSTA_LT << 16);
  801. if (advk_pcie_link_active(pcie))
  802. val |= (PCI_EXP_LNKSTA_DLLLA << 16);
  803. *value = val;
  804. return PCI_BRIDGE_EMUL_HANDLED;
  805. }
  806. case PCI_EXP_DEVCAP:
  807. case PCI_EXP_DEVCTL:
  808. case PCI_EXP_DEVCAP2:
  809. case PCI_EXP_DEVCTL2:
  810. case PCI_EXP_LNKCAP2:
  811. case PCI_EXP_LNKCTL2:
  812. *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
  813. return PCI_BRIDGE_EMUL_HANDLED;
  814. default:
  815. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  816. }
  817. }
  818. static void
  819. advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
  820. int reg, u32 old, u32 new, u32 mask)
  821. {
  822. struct advk_pcie *pcie = bridge->data;
  823. switch (reg) {
  824. case PCI_EXP_LNKCTL:
  825. advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
  826. if (new & PCI_EXP_LNKCTL_RL)
  827. advk_pcie_wait_for_retrain(pcie);
  828. break;
  829. case PCI_EXP_RTCTL: {
  830. u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
  831. /* Only emulation of PMEIE and CRSSVE bits is provided */
  832. rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
  833. bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
  834. break;
  835. }
  836. /*
  837. * PCI_EXP_RTSTA is also supported, but does not need to be handled
  838. * here, because its value is stored in emulated config space buffer,
  839. * and we write it there when needed.
  840. */
  841. case PCI_EXP_DEVCTL:
  842. case PCI_EXP_DEVCTL2:
  843. case PCI_EXP_LNKCTL2:
  844. advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
  845. break;
  846. default:
  847. break;
  848. }
  849. }
  850. static pci_bridge_emul_read_status_t
  851. advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
  852. int reg, u32 *value)
  853. {
  854. struct advk_pcie *pcie = bridge->data;
  855. switch (reg) {
  856. case 0:
  857. *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
  858. /*
  859. * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
  860. * 3700 Functional Specification does not document registers
  861. * at those addresses.
  862. *
  863. * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
  864. * Reporting Capability header the last Extended Capability.
  865. * If we obtain documentation for those registers in the
  866. * future, this can be changed.
  867. */
  868. *value &= 0x000fffff;
  869. return PCI_BRIDGE_EMUL_HANDLED;
  870. case PCI_ERR_UNCOR_STATUS:
  871. case PCI_ERR_UNCOR_MASK:
  872. case PCI_ERR_UNCOR_SEVER:
  873. case PCI_ERR_COR_STATUS:
  874. case PCI_ERR_COR_MASK:
  875. case PCI_ERR_CAP:
  876. case PCI_ERR_HEADER_LOG + 0:
  877. case PCI_ERR_HEADER_LOG + 4:
  878. case PCI_ERR_HEADER_LOG + 8:
  879. case PCI_ERR_HEADER_LOG + 12:
  880. case PCI_ERR_ROOT_COMMAND:
  881. case PCI_ERR_ROOT_STATUS:
  882. case PCI_ERR_ROOT_ERR_SRC:
  883. *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
  884. return PCI_BRIDGE_EMUL_HANDLED;
  885. default:
  886. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  887. }
  888. }
  889. static void
  890. advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
  891. int reg, u32 old, u32 new, u32 mask)
  892. {
  893. struct advk_pcie *pcie = bridge->data;
  894. switch (reg) {
  895. /* These are W1C registers, so clear other bits */
  896. case PCI_ERR_UNCOR_STATUS:
  897. case PCI_ERR_COR_STATUS:
  898. case PCI_ERR_ROOT_STATUS:
  899. new &= mask;
  900. fallthrough;
  901. case PCI_ERR_UNCOR_MASK:
  902. case PCI_ERR_UNCOR_SEVER:
  903. case PCI_ERR_COR_MASK:
  904. case PCI_ERR_CAP:
  905. case PCI_ERR_HEADER_LOG + 0:
  906. case PCI_ERR_HEADER_LOG + 4:
  907. case PCI_ERR_HEADER_LOG + 8:
  908. case PCI_ERR_HEADER_LOG + 12:
  909. case PCI_ERR_ROOT_COMMAND:
  910. case PCI_ERR_ROOT_ERR_SRC:
  911. advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
  912. break;
  913. default:
  914. break;
  915. }
  916. }
  917. static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
  918. .read_base = advk_pci_bridge_emul_base_conf_read,
  919. .write_base = advk_pci_bridge_emul_base_conf_write,
  920. .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
  921. .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
  922. .read_ext = advk_pci_bridge_emul_ext_conf_read,
  923. .write_ext = advk_pci_bridge_emul_ext_conf_write,
  924. };
  925. /*
  926. * Initialize the configuration space of the PCI-to-PCI bridge
  927. * associated with the given PCIe interface.
  928. */
  929. static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
  930. {
  931. struct pci_bridge_emul *bridge = &pcie->bridge;
  932. bridge->conf.vendor =
  933. cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
  934. bridge->conf.device =
  935. cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
  936. bridge->conf.class_revision =
  937. cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
  938. /* Support 32 bits I/O addressing */
  939. bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
  940. bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
  941. /* Support 64 bits memory pref */
  942. bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
  943. bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
  944. /* Support interrupt A for MSI feature */
  945. bridge->conf.intpin = PCI_INTERRUPT_INTA;
  946. /*
  947. * Aardvark HW provides PCIe Capability structure in version 2 and
  948. * indicate slot support, which is emulated.
  949. */
  950. bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
  951. /*
  952. * Set Presence Detect State bit permanently since there is no support
  953. * for unplugging the card nor detecting whether it is plugged. (If a
  954. * platform exists in the future that supports it, via a GPIO for
  955. * example, it should be implemented via this bit.)
  956. *
  957. * Set physical slot number to 1 since there is only one port and zero
  958. * value is reserved for ports within the same silicon as Root Port
  959. * which is not our case.
  960. */
  961. bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
  962. 1));
  963. bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
  964. /* Indicates supports for Completion Retry Status */
  965. bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
  966. bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
  967. bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
  968. bridge->has_pcie = true;
  969. bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
  970. bridge->data = pcie;
  971. bridge->ops = &advk_pci_bridge_emul_ops;
  972. return pci_bridge_emul_init(bridge, 0);
  973. }
  974. static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
  975. int devfn)
  976. {
  977. if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
  978. return false;
  979. /*
  980. * If the link goes down after we check for link-up, we have a problem:
  981. * if a PIO request is executed while link-down, the whole controller
  982. * gets stuck in a non-functional state, and even after link comes up
  983. * again, PIO requests won't work anymore, and a reset of the whole PCIe
  984. * controller is needed. Therefore we need to prevent sending PIO
  985. * requests while the link is down.
  986. */
  987. if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
  988. return false;
  989. return true;
  990. }
  991. static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
  992. {
  993. struct device *dev = &pcie->pdev->dev;
  994. /*
  995. * Trying to start a new PIO transfer when previous has not completed
  996. * cause External Abort on CPU which results in kernel panic:
  997. *
  998. * SError Interrupt on CPU0, code 0xbf000002 -- SError
  999. * Kernel panic - not syncing: Asynchronous SError Interrupt
  1000. *
  1001. * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
  1002. * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
  1003. * concurrent calls at the same time. But because PIO transfer may take
  1004. * about 1.5s when link is down or card is disconnected, it means that
  1005. * advk_pcie_wait_pio() does not always have to wait for completion.
  1006. *
  1007. * Some versions of ARM Trusted Firmware handles this External Abort at
  1008. * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
  1009. * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
  1010. */
  1011. if (advk_readl(pcie, PIO_START)) {
  1012. dev_err(dev, "Previous PIO read/write transfer is still running\n");
  1013. return true;
  1014. }
  1015. return false;
  1016. }
  1017. static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
  1018. int where, int size, u32 *val)
  1019. {
  1020. struct advk_pcie *pcie = bus->sysdata;
  1021. int retry_count;
  1022. bool allow_crs;
  1023. u32 reg;
  1024. int ret;
  1025. if (!advk_pcie_valid_device(pcie, bus, devfn))
  1026. return PCIBIOS_DEVICE_NOT_FOUND;
  1027. if (pci_is_root_bus(bus))
  1028. return pci_bridge_emul_conf_read(&pcie->bridge, where,
  1029. size, val);
  1030. /*
  1031. * Completion Retry Status is possible to return only when reading all
  1032. * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
  1033. * CRSSVE flag on Root Bridge is enabled.
  1034. */
  1035. allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
  1036. (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
  1037. PCI_EXP_RTCTL_CRSSVE);
  1038. if (advk_pcie_pio_is_running(pcie))
  1039. goto try_crs;
  1040. /* Program the control register */
  1041. reg = advk_readl(pcie, PIO_CTRL);
  1042. reg &= ~PIO_CTRL_TYPE_MASK;
  1043. if (pci_is_root_bus(bus->parent))
  1044. reg |= PCIE_CONFIG_RD_TYPE0;
  1045. else
  1046. reg |= PCIE_CONFIG_RD_TYPE1;
  1047. advk_writel(pcie, reg, PIO_CTRL);
  1048. /* Program the address registers */
  1049. reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
  1050. advk_writel(pcie, reg, PIO_ADDR_LS);
  1051. advk_writel(pcie, 0, PIO_ADDR_MS);
  1052. /* Program the data strobe */
  1053. advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
  1054. retry_count = 0;
  1055. do {
  1056. /* Clear PIO DONE ISR and start the transfer */
  1057. advk_writel(pcie, 1, PIO_ISR);
  1058. advk_writel(pcie, 1, PIO_START);
  1059. ret = advk_pcie_wait_pio(pcie);
  1060. if (ret < 0)
  1061. goto try_crs;
  1062. retry_count += ret;
  1063. /* Check PIO status and get the read result */
  1064. ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
  1065. } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
  1066. if (ret < 0)
  1067. goto fail;
  1068. if (size == 1)
  1069. *val = (*val >> (8 * (where & 3))) & 0xff;
  1070. else if (size == 2)
  1071. *val = (*val >> (8 * (where & 3))) & 0xffff;
  1072. return PCIBIOS_SUCCESSFUL;
  1073. try_crs:
  1074. /*
  1075. * If it is possible, return Completion Retry Status so that caller
  1076. * tries to issue the request again instead of failing.
  1077. */
  1078. if (allow_crs) {
  1079. *val = CFG_RD_CRS_VAL;
  1080. return PCIBIOS_SUCCESSFUL;
  1081. }
  1082. fail:
  1083. *val = 0xffffffff;
  1084. return PCIBIOS_SET_FAILED;
  1085. }
  1086. static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  1087. int where, int size, u32 val)
  1088. {
  1089. struct advk_pcie *pcie = bus->sysdata;
  1090. u32 reg;
  1091. u32 data_strobe = 0x0;
  1092. int retry_count;
  1093. int offset;
  1094. int ret;
  1095. if (!advk_pcie_valid_device(pcie, bus, devfn))
  1096. return PCIBIOS_DEVICE_NOT_FOUND;
  1097. if (pci_is_root_bus(bus))
  1098. return pci_bridge_emul_conf_write(&pcie->bridge, where,
  1099. size, val);
  1100. if (where % size)
  1101. return PCIBIOS_SET_FAILED;
  1102. if (advk_pcie_pio_is_running(pcie))
  1103. return PCIBIOS_SET_FAILED;
  1104. /* Program the control register */
  1105. reg = advk_readl(pcie, PIO_CTRL);
  1106. reg &= ~PIO_CTRL_TYPE_MASK;
  1107. if (pci_is_root_bus(bus->parent))
  1108. reg |= PCIE_CONFIG_WR_TYPE0;
  1109. else
  1110. reg |= PCIE_CONFIG_WR_TYPE1;
  1111. advk_writel(pcie, reg, PIO_CTRL);
  1112. /* Program the address registers */
  1113. reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
  1114. advk_writel(pcie, reg, PIO_ADDR_LS);
  1115. advk_writel(pcie, 0, PIO_ADDR_MS);
  1116. /* Calculate the write strobe */
  1117. offset = where & 0x3;
  1118. reg = val << (8 * offset);
  1119. data_strobe = GENMASK(size - 1, 0) << offset;
  1120. /* Program the data register */
  1121. advk_writel(pcie, reg, PIO_WR_DATA);
  1122. /* Program the data strobe */
  1123. advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
  1124. retry_count = 0;
  1125. do {
  1126. /* Clear PIO DONE ISR and start the transfer */
  1127. advk_writel(pcie, 1, PIO_ISR);
  1128. advk_writel(pcie, 1, PIO_START);
  1129. ret = advk_pcie_wait_pio(pcie);
  1130. if (ret < 0)
  1131. return PCIBIOS_SET_FAILED;
  1132. retry_count += ret;
  1133. ret = advk_pcie_check_pio_status(pcie, false, NULL);
  1134. } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
  1135. return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
  1136. }
  1137. static struct pci_ops advk_pcie_ops = {
  1138. .read = advk_pcie_rd_conf,
  1139. .write = advk_pcie_wr_conf,
  1140. };
  1141. static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
  1142. struct msi_msg *msg)
  1143. {
  1144. struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
  1145. phys_addr_t msi_addr = virt_to_phys(pcie);
  1146. msg->address_lo = lower_32_bits(msi_addr);
  1147. msg->address_hi = upper_32_bits(msi_addr);
  1148. msg->data = data->hwirq;
  1149. }
  1150. static int advk_msi_set_affinity(struct irq_data *irq_data,
  1151. const struct cpumask *mask, bool force)
  1152. {
  1153. return -EINVAL;
  1154. }
  1155. static void advk_msi_irq_mask(struct irq_data *d)
  1156. {
  1157. struct advk_pcie *pcie = d->domain->host_data;
  1158. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  1159. unsigned long flags;
  1160. u32 mask;
  1161. raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
  1162. mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
  1163. mask |= BIT(hwirq);
  1164. advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
  1165. raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
  1166. }
  1167. static void advk_msi_irq_unmask(struct irq_data *d)
  1168. {
  1169. struct advk_pcie *pcie = d->domain->host_data;
  1170. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  1171. unsigned long flags;
  1172. u32 mask;
  1173. raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
  1174. mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
  1175. mask &= ~BIT(hwirq);
  1176. advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
  1177. raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
  1178. }
  1179. static void advk_msi_top_irq_mask(struct irq_data *d)
  1180. {
  1181. pci_msi_mask_irq(d);
  1182. irq_chip_mask_parent(d);
  1183. }
  1184. static void advk_msi_top_irq_unmask(struct irq_data *d)
  1185. {
  1186. pci_msi_unmask_irq(d);
  1187. irq_chip_unmask_parent(d);
  1188. }
  1189. static struct irq_chip advk_msi_bottom_irq_chip = {
  1190. .name = "MSI",
  1191. .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
  1192. .irq_set_affinity = advk_msi_set_affinity,
  1193. .irq_mask = advk_msi_irq_mask,
  1194. .irq_unmask = advk_msi_irq_unmask,
  1195. };
  1196. static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
  1197. unsigned int virq,
  1198. unsigned int nr_irqs, void *args)
  1199. {
  1200. struct advk_pcie *pcie = domain->host_data;
  1201. int hwirq, i;
  1202. mutex_lock(&pcie->msi_used_lock);
  1203. hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
  1204. order_base_2(nr_irqs));
  1205. mutex_unlock(&pcie->msi_used_lock);
  1206. if (hwirq < 0)
  1207. return -ENOSPC;
  1208. for (i = 0; i < nr_irqs; i++)
  1209. irq_domain_set_info(domain, virq + i, hwirq + i,
  1210. &advk_msi_bottom_irq_chip,
  1211. domain->host_data, handle_simple_irq,
  1212. NULL, NULL);
  1213. return 0;
  1214. }
  1215. static void advk_msi_irq_domain_free(struct irq_domain *domain,
  1216. unsigned int virq, unsigned int nr_irqs)
  1217. {
  1218. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  1219. struct advk_pcie *pcie = domain->host_data;
  1220. mutex_lock(&pcie->msi_used_lock);
  1221. bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
  1222. mutex_unlock(&pcie->msi_used_lock);
  1223. }
  1224. static const struct irq_domain_ops advk_msi_domain_ops = {
  1225. .alloc = advk_msi_irq_domain_alloc,
  1226. .free = advk_msi_irq_domain_free,
  1227. };
  1228. static void advk_pcie_irq_mask(struct irq_data *d)
  1229. {
  1230. struct advk_pcie *pcie = d->domain->host_data;
  1231. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  1232. unsigned long flags;
  1233. u32 mask;
  1234. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  1235. mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
  1236. mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
  1237. advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
  1238. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  1239. }
  1240. static void advk_pcie_irq_unmask(struct irq_data *d)
  1241. {
  1242. struct advk_pcie *pcie = d->domain->host_data;
  1243. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  1244. unsigned long flags;
  1245. u32 mask;
  1246. raw_spin_lock_irqsave(&pcie->irq_lock, flags);
  1247. mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
  1248. mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
  1249. advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
  1250. raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
  1251. }
  1252. static int advk_pcie_irq_map(struct irq_domain *h,
  1253. unsigned int virq, irq_hw_number_t hwirq)
  1254. {
  1255. struct advk_pcie *pcie = h->host_data;
  1256. irq_set_status_flags(virq, IRQ_LEVEL);
  1257. irq_set_chip_and_handler(virq, &pcie->irq_chip,
  1258. handle_level_irq);
  1259. irq_set_chip_data(virq, pcie);
  1260. return 0;
  1261. }
  1262. static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
  1263. .map = advk_pcie_irq_map,
  1264. .xlate = irq_domain_xlate_onecell,
  1265. };
  1266. static struct irq_chip advk_msi_irq_chip = {
  1267. .name = "advk-MSI",
  1268. .irq_mask = advk_msi_top_irq_mask,
  1269. .irq_unmask = advk_msi_top_irq_unmask,
  1270. };
  1271. static struct msi_domain_info advk_msi_domain_info = {
  1272. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  1273. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
  1274. .chip = &advk_msi_irq_chip,
  1275. };
  1276. static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
  1277. {
  1278. struct device *dev = &pcie->pdev->dev;
  1279. raw_spin_lock_init(&pcie->msi_irq_lock);
  1280. mutex_init(&pcie->msi_used_lock);
  1281. pcie->msi_inner_domain =
  1282. irq_domain_add_linear(NULL, MSI_IRQ_NUM,
  1283. &advk_msi_domain_ops, pcie);
  1284. if (!pcie->msi_inner_domain)
  1285. return -ENOMEM;
  1286. pcie->msi_domain =
  1287. pci_msi_create_irq_domain(dev_fwnode(dev),
  1288. &advk_msi_domain_info,
  1289. pcie->msi_inner_domain);
  1290. if (!pcie->msi_domain) {
  1291. irq_domain_remove(pcie->msi_inner_domain);
  1292. return -ENOMEM;
  1293. }
  1294. return 0;
  1295. }
  1296. static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
  1297. {
  1298. irq_domain_remove(pcie->msi_domain);
  1299. irq_domain_remove(pcie->msi_inner_domain);
  1300. }
  1301. static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
  1302. {
  1303. struct device *dev = &pcie->pdev->dev;
  1304. struct device_node *node = dev->of_node;
  1305. struct device_node *pcie_intc_node;
  1306. struct irq_chip *irq_chip;
  1307. int ret = 0;
  1308. raw_spin_lock_init(&pcie->irq_lock);
  1309. pcie_intc_node = of_get_next_child(node, NULL);
  1310. if (!pcie_intc_node) {
  1311. dev_err(dev, "No PCIe Intc node found\n");
  1312. return -ENODEV;
  1313. }
  1314. irq_chip = &pcie->irq_chip;
  1315. irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
  1316. dev_name(dev));
  1317. if (!irq_chip->name) {
  1318. ret = -ENOMEM;
  1319. goto out_put_node;
  1320. }
  1321. irq_chip->irq_mask = advk_pcie_irq_mask;
  1322. irq_chip->irq_unmask = advk_pcie_irq_unmask;
  1323. pcie->irq_domain =
  1324. irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  1325. &advk_pcie_irq_domain_ops, pcie);
  1326. if (!pcie->irq_domain) {
  1327. dev_err(dev, "Failed to get a INTx IRQ domain\n");
  1328. ret = -ENOMEM;
  1329. goto out_put_node;
  1330. }
  1331. out_put_node:
  1332. of_node_put(pcie_intc_node);
  1333. return ret;
  1334. }
  1335. static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
  1336. {
  1337. irq_domain_remove(pcie->irq_domain);
  1338. }
  1339. static struct irq_chip advk_rp_irq_chip = {
  1340. .name = "advk-RP",
  1341. };
  1342. static int advk_pcie_rp_irq_map(struct irq_domain *h,
  1343. unsigned int virq, irq_hw_number_t hwirq)
  1344. {
  1345. struct advk_pcie *pcie = h->host_data;
  1346. irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
  1347. irq_set_chip_data(virq, pcie);
  1348. return 0;
  1349. }
  1350. static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
  1351. .map = advk_pcie_rp_irq_map,
  1352. .xlate = irq_domain_xlate_onecell,
  1353. };
  1354. static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
  1355. {
  1356. pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
  1357. &advk_pcie_rp_irq_domain_ops,
  1358. pcie);
  1359. if (!pcie->rp_irq_domain) {
  1360. dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
  1361. return -ENOMEM;
  1362. }
  1363. return 0;
  1364. }
  1365. static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
  1366. {
  1367. irq_domain_remove(pcie->rp_irq_domain);
  1368. }
  1369. static void advk_pcie_handle_pme(struct advk_pcie *pcie)
  1370. {
  1371. u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
  1372. advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
  1373. /*
  1374. * PCIE_MSG_LOG_REG contains the last inbound message, so store
  1375. * the requester ID only when PME was not asserted yet.
  1376. * Also do not trigger PME interrupt when PME is still asserted.
  1377. */
  1378. if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
  1379. pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
  1380. /*
  1381. * Trigger PME interrupt only if PMEIE bit in Root Control is set.
  1382. * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
  1383. */
  1384. if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
  1385. return;
  1386. if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
  1387. dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
  1388. }
  1389. }
  1390. static void advk_pcie_handle_msi(struct advk_pcie *pcie)
  1391. {
  1392. u32 msi_val, msi_mask, msi_status, msi_idx;
  1393. msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
  1394. msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
  1395. msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
  1396. for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
  1397. if (!(BIT(msi_idx) & msi_status))
  1398. continue;
  1399. advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
  1400. if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
  1401. dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
  1402. }
  1403. advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
  1404. PCIE_ISR0_REG);
  1405. }
  1406. static void advk_pcie_handle_int(struct advk_pcie *pcie)
  1407. {
  1408. u32 isr0_val, isr0_mask, isr0_status;
  1409. u32 isr1_val, isr1_mask, isr1_status;
  1410. int i;
  1411. isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
  1412. isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
  1413. isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
  1414. isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
  1415. isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
  1416. isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
  1417. /* Process PME interrupt as the first one to do not miss PME requester id */
  1418. if (isr0_status & PCIE_MSG_PM_PME_MASK)
  1419. advk_pcie_handle_pme(pcie);
  1420. /* Process ERR interrupt */
  1421. if (isr0_status & PCIE_ISR0_ERR_MASK) {
  1422. advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
  1423. /*
  1424. * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
  1425. * PCIe interrupt 0
  1426. */
  1427. if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
  1428. dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
  1429. }
  1430. /* Process MSI interrupts */
  1431. if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
  1432. advk_pcie_handle_msi(pcie);
  1433. /* Process legacy interrupts */
  1434. for (i = 0; i < PCI_NUM_INTX; i++) {
  1435. if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
  1436. continue;
  1437. advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
  1438. PCIE_ISR1_REG);
  1439. if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
  1440. dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
  1441. (char)i + 'A');
  1442. }
  1443. }
  1444. static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
  1445. {
  1446. struct advk_pcie *pcie = arg;
  1447. u32 status;
  1448. status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
  1449. if (!(status & PCIE_IRQ_CORE_INT))
  1450. return IRQ_NONE;
  1451. advk_pcie_handle_int(pcie);
  1452. /* Clear interrupt */
  1453. advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
  1454. return IRQ_HANDLED;
  1455. }
  1456. static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  1457. {
  1458. struct advk_pcie *pcie = dev->bus->sysdata;
  1459. /*
  1460. * Emulated root bridge has its own emulated irq chip and irq domain.
  1461. * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
  1462. * hwirq for irq_create_mapping() is indexed from zero.
  1463. */
  1464. if (pci_is_root_bus(dev->bus))
  1465. return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
  1466. else
  1467. return of_irq_parse_and_map_pci(dev, slot, pin);
  1468. }
  1469. static void advk_pcie_disable_phy(struct advk_pcie *pcie)
  1470. {
  1471. phy_power_off(pcie->phy);
  1472. phy_exit(pcie->phy);
  1473. }
  1474. static int advk_pcie_enable_phy(struct advk_pcie *pcie)
  1475. {
  1476. int ret;
  1477. if (!pcie->phy)
  1478. return 0;
  1479. ret = phy_init(pcie->phy);
  1480. if (ret)
  1481. return ret;
  1482. ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
  1483. if (ret) {
  1484. phy_exit(pcie->phy);
  1485. return ret;
  1486. }
  1487. ret = phy_power_on(pcie->phy);
  1488. if (ret) {
  1489. phy_exit(pcie->phy);
  1490. return ret;
  1491. }
  1492. return 0;
  1493. }
  1494. static int advk_pcie_setup_phy(struct advk_pcie *pcie)
  1495. {
  1496. struct device *dev = &pcie->pdev->dev;
  1497. struct device_node *node = dev->of_node;
  1498. int ret = 0;
  1499. pcie->phy = devm_of_phy_get(dev, node, NULL);
  1500. if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
  1501. return PTR_ERR(pcie->phy);
  1502. /* Old bindings miss the PHY handle */
  1503. if (IS_ERR(pcie->phy)) {
  1504. dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
  1505. pcie->phy = NULL;
  1506. return 0;
  1507. }
  1508. ret = advk_pcie_enable_phy(pcie);
  1509. if (ret)
  1510. dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
  1511. return ret;
  1512. }
  1513. static int advk_pcie_probe(struct platform_device *pdev)
  1514. {
  1515. struct device *dev = &pdev->dev;
  1516. struct advk_pcie *pcie;
  1517. struct pci_host_bridge *bridge;
  1518. struct resource_entry *entry;
  1519. int ret, irq;
  1520. bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
  1521. if (!bridge)
  1522. return -ENOMEM;
  1523. pcie = pci_host_bridge_priv(bridge);
  1524. pcie->pdev = pdev;
  1525. platform_set_drvdata(pdev, pcie);
  1526. resource_list_for_each_entry(entry, &bridge->windows) {
  1527. resource_size_t start = entry->res->start;
  1528. resource_size_t size = resource_size(entry->res);
  1529. unsigned long type = resource_type(entry->res);
  1530. u64 win_size;
  1531. /*
  1532. * Aardvark hardware allows to configure also PCIe window
  1533. * for config type 0 and type 1 mapping, but driver uses
  1534. * only PIO for issuing configuration transfers which does
  1535. * not use PCIe window configuration.
  1536. */
  1537. if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
  1538. continue;
  1539. /*
  1540. * Skip transparent memory resources. Default outbound access
  1541. * configuration is set to transparent memory access so it
  1542. * does not need window configuration.
  1543. */
  1544. if (type == IORESOURCE_MEM && entry->offset == 0)
  1545. continue;
  1546. /*
  1547. * The n-th PCIe window is configured by tuple (match, remap, mask)
  1548. * and an access to address A uses this window if A matches the
  1549. * match with given mask.
  1550. * So every PCIe window size must be a power of two and every start
  1551. * address must be aligned to window size. Minimal size is 64 KiB
  1552. * because lower 16 bits of mask must be zero. Remapped address
  1553. * may have set only bits from the mask.
  1554. */
  1555. while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
  1556. /* Calculate the largest aligned window size */
  1557. win_size = (1ULL << (fls64(size)-1)) |
  1558. (start ? (1ULL << __ffs64(start)) : 0);
  1559. win_size = 1ULL << __ffs64(win_size);
  1560. if (win_size < 0x10000)
  1561. break;
  1562. dev_dbg(dev,
  1563. "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
  1564. pcie->wins_count, (unsigned long long)start,
  1565. (unsigned long long)start + win_size, type);
  1566. if (type == IORESOURCE_IO) {
  1567. pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
  1568. pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
  1569. } else {
  1570. pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
  1571. pcie->wins[pcie->wins_count].match = start;
  1572. }
  1573. pcie->wins[pcie->wins_count].remap = start - entry->offset;
  1574. pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
  1575. if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
  1576. break;
  1577. start += win_size;
  1578. size -= win_size;
  1579. pcie->wins_count++;
  1580. }
  1581. if (size > 0) {
  1582. dev_err(&pcie->pdev->dev,
  1583. "Invalid PCIe region [0x%llx-0x%llx]\n",
  1584. (unsigned long long)entry->res->start,
  1585. (unsigned long long)entry->res->end + 1);
  1586. return -EINVAL;
  1587. }
  1588. }
  1589. pcie->base = devm_platform_ioremap_resource(pdev, 0);
  1590. if (IS_ERR(pcie->base))
  1591. return PTR_ERR(pcie->base);
  1592. irq = platform_get_irq(pdev, 0);
  1593. if (irq < 0)
  1594. return irq;
  1595. ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
  1596. IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
  1597. pcie);
  1598. if (ret) {
  1599. dev_err(dev, "Failed to register interrupt\n");
  1600. return ret;
  1601. }
  1602. pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
  1603. "reset-gpios", 0,
  1604. GPIOD_OUT_LOW,
  1605. "pcie1-reset");
  1606. ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
  1607. if (ret) {
  1608. if (ret == -ENOENT) {
  1609. pcie->reset_gpio = NULL;
  1610. } else {
  1611. if (ret != -EPROBE_DEFER)
  1612. dev_err(dev, "Failed to get reset-gpio: %i\n",
  1613. ret);
  1614. return ret;
  1615. }
  1616. }
  1617. ret = of_pci_get_max_link_speed(dev->of_node);
  1618. if (ret <= 0 || ret > 3)
  1619. pcie->link_gen = 3;
  1620. else
  1621. pcie->link_gen = ret;
  1622. ret = advk_pcie_setup_phy(pcie);
  1623. if (ret)
  1624. return ret;
  1625. advk_pcie_setup_hw(pcie);
  1626. ret = advk_sw_pci_bridge_init(pcie);
  1627. if (ret) {
  1628. dev_err(dev, "Failed to register emulated root PCI bridge\n");
  1629. return ret;
  1630. }
  1631. ret = advk_pcie_init_irq_domain(pcie);
  1632. if (ret) {
  1633. dev_err(dev, "Failed to initialize irq\n");
  1634. return ret;
  1635. }
  1636. ret = advk_pcie_init_msi_irq_domain(pcie);
  1637. if (ret) {
  1638. dev_err(dev, "Failed to initialize irq\n");
  1639. advk_pcie_remove_irq_domain(pcie);
  1640. return ret;
  1641. }
  1642. ret = advk_pcie_init_rp_irq_domain(pcie);
  1643. if (ret) {
  1644. dev_err(dev, "Failed to initialize irq\n");
  1645. advk_pcie_remove_msi_irq_domain(pcie);
  1646. advk_pcie_remove_irq_domain(pcie);
  1647. return ret;
  1648. }
  1649. bridge->sysdata = pcie;
  1650. bridge->ops = &advk_pcie_ops;
  1651. bridge->map_irq = advk_pcie_map_irq;
  1652. ret = pci_host_probe(bridge);
  1653. if (ret < 0) {
  1654. advk_pcie_remove_rp_irq_domain(pcie);
  1655. advk_pcie_remove_msi_irq_domain(pcie);
  1656. advk_pcie_remove_irq_domain(pcie);
  1657. return ret;
  1658. }
  1659. return 0;
  1660. }
  1661. static int advk_pcie_remove(struct platform_device *pdev)
  1662. {
  1663. struct advk_pcie *pcie = platform_get_drvdata(pdev);
  1664. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1665. u32 val;
  1666. int i;
  1667. /* Remove PCI bus with all devices */
  1668. pci_lock_rescan_remove();
  1669. pci_stop_root_bus(bridge->bus);
  1670. pci_remove_root_bus(bridge->bus);
  1671. pci_unlock_rescan_remove();
  1672. /* Disable Root Bridge I/O space, memory space and bus mastering */
  1673. val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
  1674. val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  1675. advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
  1676. /* Disable MSI */
  1677. val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
  1678. val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
  1679. advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
  1680. /* Clear MSI address */
  1681. advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
  1682. advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
  1683. /* Mask all interrupts */
  1684. advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
  1685. advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
  1686. advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
  1687. advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
  1688. /* Clear all interrupts */
  1689. advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
  1690. advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
  1691. advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
  1692. advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
  1693. /* Remove IRQ domains */
  1694. advk_pcie_remove_rp_irq_domain(pcie);
  1695. advk_pcie_remove_msi_irq_domain(pcie);
  1696. advk_pcie_remove_irq_domain(pcie);
  1697. /* Free config space for emulated root bridge */
  1698. pci_bridge_emul_cleanup(&pcie->bridge);
  1699. /* Assert PERST# signal which prepares PCIe card for power down */
  1700. if (pcie->reset_gpio)
  1701. gpiod_set_value_cansleep(pcie->reset_gpio, 1);
  1702. /* Disable link training */
  1703. val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
  1704. val &= ~LINK_TRAINING_EN;
  1705. advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
  1706. /* Disable outbound address windows mapping */
  1707. for (i = 0; i < OB_WIN_COUNT; i++)
  1708. advk_pcie_disable_ob_win(pcie, i);
  1709. /* Disable phy */
  1710. advk_pcie_disable_phy(pcie);
  1711. return 0;
  1712. }
  1713. static const struct of_device_id advk_pcie_of_match_table[] = {
  1714. { .compatible = "marvell,armada-3700-pcie", },
  1715. {},
  1716. };
  1717. MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
  1718. static struct platform_driver advk_pcie_driver = {
  1719. .driver = {
  1720. .name = "advk-pcie",
  1721. .of_match_table = advk_pcie_of_match_table,
  1722. },
  1723. .probe = advk_pcie_probe,
  1724. .remove = advk_pcie_remove,
  1725. };
  1726. module_platform_driver(advk_pcie_driver);
  1727. MODULE_DESCRIPTION("Aardvark PCIe controller");
  1728. MODULE_LICENSE("GPL v2");