pci-mvebu.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCIe driver for Marvell Armada 370 and Armada XP SoCs
  4. *
  5. * Author: Thomas Petazzoni <[email protected]>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #include <linux/bitfield.h>
  11. #include <linux/clk.h>
  12. #include <linux/delay.h>
  13. #include <linux/gpio.h>
  14. #include <linux/init.h>
  15. #include <linux/mbus.h>
  16. #include <linux/slab.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/of_pci.h>
  22. #include <linux/of_platform.h>
  23. #include "../pci.h"
  24. #include "../pci-bridge-emul.h"
  25. /*
  26. * PCIe unit register offsets.
  27. */
  28. #define PCIE_DEV_ID_OFF 0x0000
  29. #define PCIE_CMD_OFF 0x0004
  30. #define PCIE_DEV_REV_OFF 0x0008
  31. #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
  32. #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
  33. #define PCIE_SSDEV_ID_OFF 0x002c
  34. #define PCIE_CAP_PCIEXP 0x0060
  35. #define PCIE_CAP_PCIERR_OFF 0x0100
  36. #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
  37. #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
  38. #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
  39. #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
  40. #define PCIE_WIN5_CTRL_OFF 0x1880
  41. #define PCIE_WIN5_BASE_OFF 0x1884
  42. #define PCIE_WIN5_REMAP_OFF 0x188c
  43. #define PCIE_CONF_ADDR_OFF 0x18f8
  44. #define PCIE_CONF_ADDR_EN 0x80000000
  45. #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
  46. #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
  47. #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
  48. #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
  49. #define PCIE_CONF_ADDR(bus, devfn, where) \
  50. (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
  51. PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
  52. PCIE_CONF_ADDR_EN)
  53. #define PCIE_CONF_DATA_OFF 0x18fc
  54. #define PCIE_INT_CAUSE_OFF 0x1900
  55. #define PCIE_INT_UNMASK_OFF 0x1910
  56. #define PCIE_INT_INTX(i) BIT(24+i)
  57. #define PCIE_INT_PM_PME BIT(28)
  58. #define PCIE_INT_ALL_MASK GENMASK(31, 0)
  59. #define PCIE_CTRL_OFF 0x1a00
  60. #define PCIE_CTRL_X1_MODE 0x0001
  61. #define PCIE_CTRL_RC_MODE BIT(1)
  62. #define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
  63. #define PCIE_STAT_OFF 0x1a04
  64. #define PCIE_STAT_BUS 0xff00
  65. #define PCIE_STAT_DEV 0x1f0000
  66. #define PCIE_STAT_LINK_DOWN BIT(0)
  67. #define PCIE_SSPL_OFF 0x1a0c
  68. #define PCIE_SSPL_VALUE_SHIFT 0
  69. #define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
  70. #define PCIE_SSPL_SCALE_SHIFT 8
  71. #define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
  72. #define PCIE_SSPL_ENABLE BIT(16)
  73. #define PCIE_RC_RTSTA 0x1a14
  74. #define PCIE_DEBUG_CTRL 0x1a60
  75. #define PCIE_DEBUG_SOFT_RESET BIT(20)
  76. struct mvebu_pcie_port;
  77. /* Structure representing all PCIe interfaces */
  78. struct mvebu_pcie {
  79. struct platform_device *pdev;
  80. struct mvebu_pcie_port *ports;
  81. struct resource io;
  82. struct resource realio;
  83. struct resource mem;
  84. struct resource busn;
  85. int nports;
  86. };
  87. struct mvebu_pcie_window {
  88. phys_addr_t base;
  89. phys_addr_t remap;
  90. size_t size;
  91. };
  92. /* Structure representing one PCIe interface */
  93. struct mvebu_pcie_port {
  94. char *name;
  95. void __iomem *base;
  96. u32 port;
  97. u32 lane;
  98. bool is_x4;
  99. int devfn;
  100. unsigned int mem_target;
  101. unsigned int mem_attr;
  102. unsigned int io_target;
  103. unsigned int io_attr;
  104. struct clk *clk;
  105. struct gpio_desc *reset_gpio;
  106. char *reset_name;
  107. struct pci_bridge_emul bridge;
  108. struct device_node *dn;
  109. struct mvebu_pcie *pcie;
  110. struct mvebu_pcie_window memwin;
  111. struct mvebu_pcie_window iowin;
  112. u32 saved_pcie_stat;
  113. struct resource regs;
  114. u8 slot_power_limit_value;
  115. u8 slot_power_limit_scale;
  116. struct irq_domain *intx_irq_domain;
  117. raw_spinlock_t irq_lock;
  118. int intx_irq;
  119. };
  120. static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
  121. {
  122. writel(val, port->base + reg);
  123. }
  124. static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
  125. {
  126. return readl(port->base + reg);
  127. }
  128. static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
  129. {
  130. return port->io_target != -1 && port->io_attr != -1;
  131. }
  132. static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
  133. {
  134. return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
  135. }
  136. static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
  137. {
  138. return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
  139. }
  140. static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
  141. {
  142. u32 stat;
  143. stat = mvebu_readl(port, PCIE_STAT_OFF);
  144. stat &= ~PCIE_STAT_BUS;
  145. stat |= nr << 8;
  146. mvebu_writel(port, stat, PCIE_STAT_OFF);
  147. }
  148. static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
  149. {
  150. u32 stat;
  151. stat = mvebu_readl(port, PCIE_STAT_OFF);
  152. stat &= ~PCIE_STAT_DEV;
  153. stat |= nr << 16;
  154. mvebu_writel(port, stat, PCIE_STAT_OFF);
  155. }
  156. static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
  157. {
  158. int i;
  159. mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
  160. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
  161. for (i = 1; i < 3; i++) {
  162. mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
  163. mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
  164. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
  165. }
  166. for (i = 0; i < 5; i++) {
  167. mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
  168. mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
  169. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  170. }
  171. mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
  172. mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
  173. mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
  174. }
  175. /*
  176. * Setup PCIE BARs and Address Decode Wins:
  177. * BAR[0] -> internal registers (needed for MSI)
  178. * BAR[1] -> covers all DRAM banks
  179. * BAR[2] -> Disabled
  180. * WIN[0-3] -> DRAM bank[0-3]
  181. */
  182. static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
  183. {
  184. const struct mbus_dram_target_info *dram;
  185. u32 size;
  186. int i;
  187. dram = mv_mbus_dram_info();
  188. /* First, disable and clear BARs and windows. */
  189. mvebu_pcie_disable_wins(port);
  190. /* Setup windows for DDR banks. Count total DDR size on the fly. */
  191. size = 0;
  192. for (i = 0; i < dram->num_cs; i++) {
  193. const struct mbus_dram_window *cs = dram->cs + i;
  194. mvebu_writel(port, cs->base & 0xffff0000,
  195. PCIE_WIN04_BASE_OFF(i));
  196. mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
  197. mvebu_writel(port,
  198. ((cs->size - 1) & 0xffff0000) |
  199. (cs->mbus_attr << 8) |
  200. (dram->mbus_dram_target_id << 4) | 1,
  201. PCIE_WIN04_CTRL_OFF(i));
  202. size += cs->size;
  203. }
  204. /* Round up 'size' to the nearest power of two. */
  205. if ((size & (size - 1)) != 0)
  206. size = 1 << fls(size);
  207. /* Setup BAR[1] to all DRAM banks. */
  208. mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
  209. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
  210. mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
  211. PCIE_BAR_CTRL_OFF(1));
  212. /*
  213. * Point BAR[0] to the device's internal registers.
  214. */
  215. mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
  216. mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
  217. }
  218. static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
  219. {
  220. u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
  221. /* Setup PCIe controller to Root Complex mode. */
  222. ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
  223. ctrl |= PCIE_CTRL_RC_MODE;
  224. mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
  225. /*
  226. * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
  227. * Capability register. This register is defined by PCIe specification
  228. * as read-only but this mvebu controller has it as read-write and must
  229. * be set to number of SerDes PCIe lanes (1 or 4). If this register is
  230. * not set correctly then link with endpoint card is not established.
  231. */
  232. lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
  233. lnkcap &= ~PCI_EXP_LNKCAP_MLW;
  234. lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
  235. mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
  236. /* Disable Root Bridge I/O space, memory space and bus mastering. */
  237. cmd = mvebu_readl(port, PCIE_CMD_OFF);
  238. cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  239. mvebu_writel(port, cmd, PCIE_CMD_OFF);
  240. /*
  241. * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
  242. * because default value is Memory controller (0x5080).
  243. *
  244. * Note that this mvebu PCI Bridge does not have compliant Type 1
  245. * Configuration Space. Header Type is reported as Type 0 and it
  246. * has format of Type 0 config space.
  247. *
  248. * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
  249. * have the same format in Marvell's specification as in PCIe
  250. * specification, but their meaning is totally different and they do
  251. * different things: they are aliased into internal mvebu registers
  252. * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
  253. * reconfigured by pci device drivers.
  254. *
  255. * Therefore driver uses emulation of PCI Bridge which emulates
  256. * access to configuration space via internal mvebu registers or
  257. * emulated configuration buffer. Driver access these PCI Bridge
  258. * directly for simplification, but these registers can be accessed
  259. * also via standard mvebu way for accessing PCI config space.
  260. */
  261. dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
  262. dev_rev &= ~0xffffff00;
  263. dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
  264. mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
  265. /* Point PCIe unit MBUS decode windows to DRAM space. */
  266. mvebu_pcie_setup_wins(port);
  267. /*
  268. * Program Root Port to automatically send Set_Slot_Power_Limit
  269. * PCIe Message when changing status from Dl_Down to Dl_Up and valid
  270. * slot power limit was specified.
  271. */
  272. sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  273. sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
  274. if (port->slot_power_limit_value) {
  275. sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
  276. sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
  277. sspl |= PCIE_SSPL_ENABLE;
  278. }
  279. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  280. /* Mask all interrupt sources. */
  281. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
  282. /* Clear all interrupt causes. */
  283. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
  284. /* Check if "intx" interrupt was specified in DT. */
  285. if (port->intx_irq > 0)
  286. return;
  287. /*
  288. * Fallback code when "intx" interrupt was not specified in DT:
  289. * Unmask all legacy INTx interrupts as driver does not provide a way
  290. * for masking and unmasking of individual legacy INTx interrupts.
  291. * Legacy INTx are reported via one shared GIC source and therefore
  292. * kernel cannot distinguish which individual legacy INTx was triggered.
  293. * These interrupts are shared, so it should not cause any issue. Just
  294. * performance penalty as every PCIe interrupt handler needs to be
  295. * called when some interrupt is triggered.
  296. */
  297. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  298. unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
  299. PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
  300. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  301. }
  302. static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
  303. struct pci_bus *bus,
  304. int devfn);
  305. static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  306. int size, u32 *val)
  307. {
  308. struct mvebu_pcie *pcie = bus->sysdata;
  309. struct mvebu_pcie_port *port;
  310. void __iomem *conf_data;
  311. port = mvebu_pcie_find_port(pcie, bus, devfn);
  312. if (!port)
  313. return PCIBIOS_DEVICE_NOT_FOUND;
  314. if (!mvebu_pcie_link_up(port))
  315. return PCIBIOS_DEVICE_NOT_FOUND;
  316. conf_data = port->base + PCIE_CONF_DATA_OFF;
  317. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  318. PCIE_CONF_ADDR_OFF);
  319. switch (size) {
  320. case 1:
  321. *val = readb_relaxed(conf_data + (where & 3));
  322. break;
  323. case 2:
  324. *val = readw_relaxed(conf_data + (where & 2));
  325. break;
  326. case 4:
  327. *val = readl_relaxed(conf_data);
  328. break;
  329. default:
  330. return PCIBIOS_BAD_REGISTER_NUMBER;
  331. }
  332. return PCIBIOS_SUCCESSFUL;
  333. }
  334. static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
  335. int where, int size, u32 val)
  336. {
  337. struct mvebu_pcie *pcie = bus->sysdata;
  338. struct mvebu_pcie_port *port;
  339. void __iomem *conf_data;
  340. port = mvebu_pcie_find_port(pcie, bus, devfn);
  341. if (!port)
  342. return PCIBIOS_DEVICE_NOT_FOUND;
  343. if (!mvebu_pcie_link_up(port))
  344. return PCIBIOS_DEVICE_NOT_FOUND;
  345. conf_data = port->base + PCIE_CONF_DATA_OFF;
  346. mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
  347. PCIE_CONF_ADDR_OFF);
  348. switch (size) {
  349. case 1:
  350. writeb(val, conf_data + (where & 3));
  351. break;
  352. case 2:
  353. writew(val, conf_data + (where & 2));
  354. break;
  355. case 4:
  356. writel(val, conf_data);
  357. break;
  358. default:
  359. return PCIBIOS_BAD_REGISTER_NUMBER;
  360. }
  361. return PCIBIOS_SUCCESSFUL;
  362. }
  363. static struct pci_ops mvebu_pcie_child_ops = {
  364. .read = mvebu_pcie_child_rd_conf,
  365. .write = mvebu_pcie_child_wr_conf,
  366. };
  367. /*
  368. * Remove windows, starting from the largest ones to the smallest
  369. * ones.
  370. */
  371. static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
  372. phys_addr_t base, size_t size)
  373. {
  374. while (size) {
  375. size_t sz = 1 << (fls(size) - 1);
  376. mvebu_mbus_del_window(base, sz);
  377. base += sz;
  378. size -= sz;
  379. }
  380. }
  381. /*
  382. * MBus windows can only have a power of two size, but PCI BARs do not
  383. * have this constraint. Therefore, we have to split the PCI BAR into
  384. * areas each having a power of two size. We start from the largest
  385. * one (i.e highest order bit set in the size).
  386. */
  387. static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
  388. unsigned int target, unsigned int attribute,
  389. phys_addr_t base, size_t size,
  390. phys_addr_t remap)
  391. {
  392. size_t size_mapped = 0;
  393. while (size) {
  394. size_t sz = 1 << (fls(size) - 1);
  395. int ret;
  396. ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
  397. sz, remap);
  398. if (ret) {
  399. phys_addr_t end = base + sz - 1;
  400. dev_err(&port->pcie->pdev->dev,
  401. "Could not create MBus window at [mem %pa-%pa]: %d\n",
  402. &base, &end, ret);
  403. mvebu_pcie_del_windows(port, base - size_mapped,
  404. size_mapped);
  405. return ret;
  406. }
  407. size -= sz;
  408. size_mapped += sz;
  409. base += sz;
  410. if (remap != MVEBU_MBUS_NO_REMAP)
  411. remap += sz;
  412. }
  413. return 0;
  414. }
  415. static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
  416. unsigned int target, unsigned int attribute,
  417. const struct mvebu_pcie_window *desired,
  418. struct mvebu_pcie_window *cur)
  419. {
  420. int ret;
  421. if (desired->base == cur->base && desired->remap == cur->remap &&
  422. desired->size == cur->size)
  423. return 0;
  424. if (cur->size != 0) {
  425. mvebu_pcie_del_windows(port, cur->base, cur->size);
  426. cur->size = 0;
  427. cur->base = 0;
  428. /*
  429. * If something tries to change the window while it is enabled
  430. * the change will not be done atomically. That would be
  431. * difficult to do in the general case.
  432. */
  433. }
  434. if (desired->size == 0)
  435. return 0;
  436. ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
  437. desired->size, desired->remap);
  438. if (ret) {
  439. cur->size = 0;
  440. cur->base = 0;
  441. return ret;
  442. }
  443. *cur = *desired;
  444. return 0;
  445. }
  446. static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
  447. {
  448. struct mvebu_pcie_window desired = {};
  449. struct pci_bridge_emul_conf *conf = &port->bridge.conf;
  450. /* Are the new iobase/iolimit values invalid? */
  451. if (conf->iolimit < conf->iobase ||
  452. le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
  453. return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
  454. &desired, &port->iowin);
  455. /*
  456. * We read the PCI-to-PCI bridge emulated registers, and
  457. * calculate the base address and size of the address decoding
  458. * window to setup, according to the PCI-to-PCI bridge
  459. * specifications. iobase is the bus address, port->iowin_base
  460. * is the CPU address.
  461. */
  462. desired.remap = ((conf->iobase & 0xF0) << 8) |
  463. (le16_to_cpu(conf->iobaseupper) << 16);
  464. desired.base = port->pcie->io.start + desired.remap;
  465. desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
  466. (le16_to_cpu(conf->iolimitupper) << 16)) -
  467. desired.remap) +
  468. 1;
  469. return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
  470. &port->iowin);
  471. }
  472. static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
  473. {
  474. struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
  475. struct pci_bridge_emul_conf *conf = &port->bridge.conf;
  476. /* Are the new membase/memlimit values invalid? */
  477. if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
  478. return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
  479. &desired, &port->memwin);
  480. /*
  481. * We read the PCI-to-PCI bridge emulated registers, and
  482. * calculate the base address and size of the address decoding
  483. * window to setup, according to the PCI-to-PCI bridge
  484. * specifications.
  485. */
  486. desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
  487. desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
  488. desired.base + 1;
  489. return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
  490. &port->memwin);
  491. }
  492. static pci_bridge_emul_read_status_t
  493. mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
  494. int reg, u32 *value)
  495. {
  496. struct mvebu_pcie_port *port = bridge->data;
  497. switch (reg) {
  498. case PCI_COMMAND:
  499. *value = mvebu_readl(port, PCIE_CMD_OFF);
  500. break;
  501. case PCI_PRIMARY_BUS: {
  502. /*
  503. * From the whole 32bit register we support reading from HW only
  504. * secondary bus number which is mvebu local bus number.
  505. * Other bits are retrieved only from emulated config buffer.
  506. */
  507. __le32 *cfgspace = (__le32 *)&bridge->conf;
  508. u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
  509. val &= ~0xff00;
  510. val |= mvebu_pcie_get_local_bus_nr(port) << 8;
  511. *value = val;
  512. break;
  513. }
  514. case PCI_INTERRUPT_LINE: {
  515. /*
  516. * From the whole 32bit register we support reading from HW only
  517. * one bit: PCI_BRIDGE_CTL_BUS_RESET.
  518. * Other bits are retrieved only from emulated config buffer.
  519. */
  520. __le32 *cfgspace = (__le32 *)&bridge->conf;
  521. u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
  522. if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
  523. val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
  524. else
  525. val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
  526. *value = val;
  527. break;
  528. }
  529. default:
  530. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  531. }
  532. return PCI_BRIDGE_EMUL_HANDLED;
  533. }
  534. static pci_bridge_emul_read_status_t
  535. mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
  536. int reg, u32 *value)
  537. {
  538. struct mvebu_pcie_port *port = bridge->data;
  539. switch (reg) {
  540. case PCI_EXP_DEVCAP:
  541. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
  542. break;
  543. case PCI_EXP_DEVCTL:
  544. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
  545. break;
  546. case PCI_EXP_LNKCAP:
  547. /*
  548. * PCIe requires that the Clock Power Management capability bit
  549. * is hard-wired to zero for downstream ports but HW returns 1.
  550. * Additionally enable Data Link Layer Link Active Reporting
  551. * Capable bit as DL_Active indication is provided too.
  552. */
  553. *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
  554. ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
  555. break;
  556. case PCI_EXP_LNKCTL:
  557. /* DL_Active indication is provided via PCIE_STAT_OFF */
  558. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
  559. (mvebu_pcie_link_up(port) ?
  560. (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
  561. break;
  562. case PCI_EXP_SLTCTL: {
  563. u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
  564. u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
  565. u32 val = 0;
  566. /*
  567. * When slot power limit was not specified in DT then
  568. * ASPL_DISABLE bit is stored only in emulated config space.
  569. * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
  570. */
  571. if (!port->slot_power_limit_value)
  572. val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
  573. else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
  574. val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
  575. /* This callback is 32-bit and in high bits is slot status. */
  576. val |= slotsta << 16;
  577. *value = val;
  578. break;
  579. }
  580. case PCI_EXP_RTSTA:
  581. *value = mvebu_readl(port, PCIE_RC_RTSTA);
  582. break;
  583. case PCI_EXP_DEVCAP2:
  584. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
  585. break;
  586. case PCI_EXP_DEVCTL2:
  587. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
  588. break;
  589. case PCI_EXP_LNKCTL2:
  590. *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
  591. break;
  592. default:
  593. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  594. }
  595. return PCI_BRIDGE_EMUL_HANDLED;
  596. }
  597. static pci_bridge_emul_read_status_t
  598. mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
  599. int reg, u32 *value)
  600. {
  601. struct mvebu_pcie_port *port = bridge->data;
  602. switch (reg) {
  603. case 0:
  604. case PCI_ERR_UNCOR_STATUS:
  605. case PCI_ERR_UNCOR_MASK:
  606. case PCI_ERR_UNCOR_SEVER:
  607. case PCI_ERR_COR_STATUS:
  608. case PCI_ERR_COR_MASK:
  609. case PCI_ERR_CAP:
  610. case PCI_ERR_HEADER_LOG+0:
  611. case PCI_ERR_HEADER_LOG+4:
  612. case PCI_ERR_HEADER_LOG+8:
  613. case PCI_ERR_HEADER_LOG+12:
  614. case PCI_ERR_ROOT_COMMAND:
  615. case PCI_ERR_ROOT_STATUS:
  616. case PCI_ERR_ROOT_ERR_SRC:
  617. *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
  618. break;
  619. default:
  620. return PCI_BRIDGE_EMUL_NOT_HANDLED;
  621. }
  622. return PCI_BRIDGE_EMUL_HANDLED;
  623. }
  624. static void
  625. mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
  626. int reg, u32 old, u32 new, u32 mask)
  627. {
  628. struct mvebu_pcie_port *port = bridge->data;
  629. struct pci_bridge_emul_conf *conf = &bridge->conf;
  630. switch (reg) {
  631. case PCI_COMMAND:
  632. mvebu_writel(port, new, PCIE_CMD_OFF);
  633. break;
  634. case PCI_IO_BASE:
  635. if ((mask & 0xffff) && mvebu_has_ioport(port) &&
  636. mvebu_pcie_handle_iobase_change(port)) {
  637. /* On error disable IO range */
  638. conf->iobase &= ~0xf0;
  639. conf->iolimit &= ~0xf0;
  640. conf->iobase |= 0xf0;
  641. conf->iobaseupper = cpu_to_le16(0x0000);
  642. conf->iolimitupper = cpu_to_le16(0x0000);
  643. }
  644. break;
  645. case PCI_MEMORY_BASE:
  646. if (mvebu_pcie_handle_membase_change(port)) {
  647. /* On error disable mem range */
  648. conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
  649. conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
  650. conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
  651. }
  652. break;
  653. case PCI_IO_BASE_UPPER16:
  654. if (mvebu_has_ioport(port) &&
  655. mvebu_pcie_handle_iobase_change(port)) {
  656. /* On error disable IO range */
  657. conf->iobase &= ~0xf0;
  658. conf->iolimit &= ~0xf0;
  659. conf->iobase |= 0xf0;
  660. conf->iobaseupper = cpu_to_le16(0x0000);
  661. conf->iolimitupper = cpu_to_le16(0x0000);
  662. }
  663. break;
  664. case PCI_PRIMARY_BUS:
  665. if (mask & 0xff00)
  666. mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
  667. break;
  668. case PCI_INTERRUPT_LINE:
  669. if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
  670. u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
  671. if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
  672. ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
  673. else
  674. ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
  675. mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
  676. }
  677. break;
  678. default:
  679. break;
  680. }
  681. }
  682. static void
  683. mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
  684. int reg, u32 old, u32 new, u32 mask)
  685. {
  686. struct mvebu_pcie_port *port = bridge->data;
  687. switch (reg) {
  688. case PCI_EXP_DEVCTL:
  689. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
  690. break;
  691. case PCI_EXP_LNKCTL:
  692. /*
  693. * PCIe requires that the Enable Clock Power Management bit
  694. * is hard-wired to zero for downstream ports but HW allows
  695. * to change it.
  696. */
  697. new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
  698. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
  699. break;
  700. case PCI_EXP_SLTCTL:
  701. /*
  702. * Allow to change PCIE_SSPL_ENABLE bit only when slot power
  703. * limit was specified in DT and configured into HW.
  704. */
  705. if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
  706. port->slot_power_limit_value) {
  707. u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  708. if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
  709. sspl &= ~PCIE_SSPL_ENABLE;
  710. else
  711. sspl |= PCIE_SSPL_ENABLE;
  712. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  713. }
  714. break;
  715. case PCI_EXP_RTSTA:
  716. /*
  717. * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
  718. * is read-only and can be cleared only by writing 0b to the
  719. * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
  720. * clear PME via Interrupt Cause.
  721. */
  722. if (new & PCI_EXP_RTSTA_PME)
  723. mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
  724. break;
  725. case PCI_EXP_DEVCTL2:
  726. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
  727. break;
  728. case PCI_EXP_LNKCTL2:
  729. mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
  730. break;
  731. default:
  732. break;
  733. }
  734. }
  735. static void
  736. mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
  737. int reg, u32 old, u32 new, u32 mask)
  738. {
  739. struct mvebu_pcie_port *port = bridge->data;
  740. switch (reg) {
  741. /* These are W1C registers, so clear other bits */
  742. case PCI_ERR_UNCOR_STATUS:
  743. case PCI_ERR_COR_STATUS:
  744. case PCI_ERR_ROOT_STATUS:
  745. new &= mask;
  746. fallthrough;
  747. case PCI_ERR_UNCOR_MASK:
  748. case PCI_ERR_UNCOR_SEVER:
  749. case PCI_ERR_COR_MASK:
  750. case PCI_ERR_CAP:
  751. case PCI_ERR_HEADER_LOG+0:
  752. case PCI_ERR_HEADER_LOG+4:
  753. case PCI_ERR_HEADER_LOG+8:
  754. case PCI_ERR_HEADER_LOG+12:
  755. case PCI_ERR_ROOT_COMMAND:
  756. case PCI_ERR_ROOT_ERR_SRC:
  757. mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
  758. break;
  759. default:
  760. break;
  761. }
  762. }
  763. static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
  764. .read_base = mvebu_pci_bridge_emul_base_conf_read,
  765. .write_base = mvebu_pci_bridge_emul_base_conf_write,
  766. .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
  767. .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
  768. .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
  769. .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
  770. };
  771. /*
  772. * Initialize the configuration space of the PCI-to-PCI bridge
  773. * associated with the given PCIe interface.
  774. */
  775. static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
  776. {
  777. unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
  778. struct pci_bridge_emul *bridge = &port->bridge;
  779. u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
  780. u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
  781. u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
  782. u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
  783. u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
  784. bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
  785. bridge->conf.device = cpu_to_le16(dev_id >> 16);
  786. bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
  787. if (mvebu_has_ioport(port)) {
  788. /* We support 32 bits I/O addressing */
  789. bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
  790. bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
  791. } else {
  792. bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
  793. }
  794. /*
  795. * Older mvebu hardware provides PCIe Capability structure only in
  796. * version 1. New hardware provides it in version 2.
  797. * Enable slot support which is emulated.
  798. */
  799. bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
  800. /*
  801. * Set Presence Detect State bit permanently as there is no support for
  802. * unplugging PCIe card from the slot. Assume that PCIe card is always
  803. * connected in slot.
  804. *
  805. * Set physical slot number to port+1 as mvebu ports are indexed from
  806. * zero and zero value is reserved for ports within the same silicon
  807. * as Root Port which is not mvebu case.
  808. *
  809. * Also set correct slot power limit.
  810. */
  811. bridge->pcie_conf.slotcap = cpu_to_le32(
  812. FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
  813. FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
  814. FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
  815. bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
  816. bridge->subsystem_vendor_id = ssdev_id & 0xffff;
  817. bridge->subsystem_id = ssdev_id >> 16;
  818. bridge->has_pcie = true;
  819. bridge->pcie_start = PCIE_CAP_PCIEXP;
  820. bridge->data = port;
  821. bridge->ops = &mvebu_pci_bridge_emul_ops;
  822. return pci_bridge_emul_init(bridge, bridge_flags);
  823. }
  824. static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
  825. {
  826. return sys->private_data;
  827. }
  828. static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
  829. struct pci_bus *bus,
  830. int devfn)
  831. {
  832. int i;
  833. for (i = 0; i < pcie->nports; i++) {
  834. struct mvebu_pcie_port *port = &pcie->ports[i];
  835. if (!port->base)
  836. continue;
  837. if (bus->number == 0 && port->devfn == devfn)
  838. return port;
  839. if (bus->number != 0 &&
  840. bus->number >= port->bridge.conf.secondary_bus &&
  841. bus->number <= port->bridge.conf.subordinate_bus)
  842. return port;
  843. }
  844. return NULL;
  845. }
  846. /* PCI configuration space write function */
  847. static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  848. int where, int size, u32 val)
  849. {
  850. struct mvebu_pcie *pcie = bus->sysdata;
  851. struct mvebu_pcie_port *port;
  852. port = mvebu_pcie_find_port(pcie, bus, devfn);
  853. if (!port)
  854. return PCIBIOS_DEVICE_NOT_FOUND;
  855. return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
  856. }
  857. /* PCI configuration space read function */
  858. static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  859. int size, u32 *val)
  860. {
  861. struct mvebu_pcie *pcie = bus->sysdata;
  862. struct mvebu_pcie_port *port;
  863. port = mvebu_pcie_find_port(pcie, bus, devfn);
  864. if (!port)
  865. return PCIBIOS_DEVICE_NOT_FOUND;
  866. return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
  867. }
  868. static struct pci_ops mvebu_pcie_ops = {
  869. .read = mvebu_pcie_rd_conf,
  870. .write = mvebu_pcie_wr_conf,
  871. };
  872. static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
  873. {
  874. struct mvebu_pcie_port *port = d->domain->host_data;
  875. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  876. unsigned long flags;
  877. u32 unmask;
  878. raw_spin_lock_irqsave(&port->irq_lock, flags);
  879. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  880. unmask &= ~PCIE_INT_INTX(hwirq);
  881. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  882. raw_spin_unlock_irqrestore(&port->irq_lock, flags);
  883. }
  884. static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
  885. {
  886. struct mvebu_pcie_port *port = d->domain->host_data;
  887. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  888. unsigned long flags;
  889. u32 unmask;
  890. raw_spin_lock_irqsave(&port->irq_lock, flags);
  891. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  892. unmask |= PCIE_INT_INTX(hwirq);
  893. mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
  894. raw_spin_unlock_irqrestore(&port->irq_lock, flags);
  895. }
  896. static struct irq_chip intx_irq_chip = {
  897. .name = "mvebu-INTx",
  898. .irq_mask = mvebu_pcie_intx_irq_mask,
  899. .irq_unmask = mvebu_pcie_intx_irq_unmask,
  900. };
  901. static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
  902. unsigned int virq, irq_hw_number_t hwirq)
  903. {
  904. struct mvebu_pcie_port *port = h->host_data;
  905. irq_set_status_flags(virq, IRQ_LEVEL);
  906. irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
  907. irq_set_chip_data(virq, port);
  908. return 0;
  909. }
  910. static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
  911. .map = mvebu_pcie_intx_irq_map,
  912. .xlate = irq_domain_xlate_onecell,
  913. };
  914. static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
  915. {
  916. struct device *dev = &port->pcie->pdev->dev;
  917. struct device_node *pcie_intc_node;
  918. raw_spin_lock_init(&port->irq_lock);
  919. pcie_intc_node = of_get_next_child(port->dn, NULL);
  920. if (!pcie_intc_node) {
  921. dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
  922. return -ENODEV;
  923. }
  924. port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
  925. &mvebu_pcie_intx_irq_domain_ops,
  926. port);
  927. of_node_put(pcie_intc_node);
  928. if (!port->intx_irq_domain) {
  929. dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
  930. return -ENOMEM;
  931. }
  932. return 0;
  933. }
  934. static void mvebu_pcie_irq_handler(struct irq_desc *desc)
  935. {
  936. struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
  937. struct irq_chip *chip = irq_desc_get_chip(desc);
  938. struct device *dev = &port->pcie->pdev->dev;
  939. u32 cause, unmask, status;
  940. int i;
  941. chained_irq_enter(chip, desc);
  942. cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
  943. unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
  944. status = cause & unmask;
  945. /* Process legacy INTx interrupts */
  946. for (i = 0; i < PCI_NUM_INTX; i++) {
  947. if (!(status & PCIE_INT_INTX(i)))
  948. continue;
  949. if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
  950. dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
  951. }
  952. chained_irq_exit(chip, desc);
  953. }
  954. static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  955. {
  956. /* Interrupt support on mvebu emulated bridges is not implemented yet */
  957. if (dev->bus->number == 0)
  958. return 0; /* Proper return code 0 == NO_IRQ */
  959. return of_irq_parse_and_map_pci(dev, slot, pin);
  960. }
  961. static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
  962. const struct resource *res,
  963. resource_size_t start,
  964. resource_size_t size,
  965. resource_size_t align)
  966. {
  967. if (dev->bus->number != 0)
  968. return start;
  969. /*
  970. * On the PCI-to-PCI bridge side, the I/O windows must have at
  971. * least a 64 KB size and the memory windows must have at
  972. * least a 1 MB size. Moreover, MBus windows need to have a
  973. * base address aligned on their size, and their size must be
  974. * a power of two. This means that if the BAR doesn't have a
  975. * power of two size, several MBus windows will actually be
  976. * created. We need to ensure that the biggest MBus window
  977. * (which will be the first one) is aligned on its size, which
  978. * explains the rounddown_pow_of_two() being done here.
  979. */
  980. if (res->flags & IORESOURCE_IO)
  981. return round_up(start, max_t(resource_size_t, SZ_64K,
  982. rounddown_pow_of_two(size)));
  983. else if (res->flags & IORESOURCE_MEM)
  984. return round_up(start, max_t(resource_size_t, SZ_1M,
  985. rounddown_pow_of_two(size)));
  986. else
  987. return start;
  988. }
  989. static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
  990. struct device_node *np,
  991. struct mvebu_pcie_port *port)
  992. {
  993. int ret = 0;
  994. ret = of_address_to_resource(np, 0, &port->regs);
  995. if (ret)
  996. return (void __iomem *)ERR_PTR(ret);
  997. return devm_ioremap_resource(&pdev->dev, &port->regs);
  998. }
  999. #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
  1000. #define DT_TYPE_IO 0x1
  1001. #define DT_TYPE_MEM32 0x2
  1002. #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
  1003. #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
  1004. static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
  1005. unsigned long type,
  1006. unsigned int *tgt,
  1007. unsigned int *attr)
  1008. {
  1009. const int na = 3, ns = 2;
  1010. const __be32 *range;
  1011. int rlen, nranges, rangesz, pna, i;
  1012. *tgt = -1;
  1013. *attr = -1;
  1014. range = of_get_property(np, "ranges", &rlen);
  1015. if (!range)
  1016. return -EINVAL;
  1017. pna = of_n_addr_cells(np);
  1018. rangesz = pna + na + ns;
  1019. nranges = rlen / sizeof(__be32) / rangesz;
  1020. for (i = 0; i < nranges; i++, range += rangesz) {
  1021. u32 flags = of_read_number(range, 1);
  1022. u32 slot = of_read_number(range + 1, 1);
  1023. u64 cpuaddr = of_read_number(range + na, pna);
  1024. unsigned long rtype;
  1025. if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
  1026. rtype = IORESOURCE_IO;
  1027. else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
  1028. rtype = IORESOURCE_MEM;
  1029. else
  1030. continue;
  1031. if (slot == PCI_SLOT(devfn) && type == rtype) {
  1032. *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
  1033. *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
  1034. return 0;
  1035. }
  1036. }
  1037. return -ENOENT;
  1038. }
  1039. static int mvebu_pcie_suspend(struct device *dev)
  1040. {
  1041. struct mvebu_pcie *pcie;
  1042. int i;
  1043. pcie = dev_get_drvdata(dev);
  1044. for (i = 0; i < pcie->nports; i++) {
  1045. struct mvebu_pcie_port *port = pcie->ports + i;
  1046. if (!port->base)
  1047. continue;
  1048. port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
  1049. }
  1050. return 0;
  1051. }
  1052. static int mvebu_pcie_resume(struct device *dev)
  1053. {
  1054. struct mvebu_pcie *pcie;
  1055. int i;
  1056. pcie = dev_get_drvdata(dev);
  1057. for (i = 0; i < pcie->nports; i++) {
  1058. struct mvebu_pcie_port *port = pcie->ports + i;
  1059. if (!port->base)
  1060. continue;
  1061. mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
  1062. mvebu_pcie_setup_hw(port);
  1063. }
  1064. return 0;
  1065. }
  1066. static void mvebu_pcie_port_clk_put(void *data)
  1067. {
  1068. struct mvebu_pcie_port *port = data;
  1069. clk_put(port->clk);
  1070. }
  1071. static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
  1072. struct mvebu_pcie_port *port, struct device_node *child)
  1073. {
  1074. struct device *dev = &pcie->pdev->dev;
  1075. enum of_gpio_flags flags;
  1076. u32 slot_power_limit;
  1077. int reset_gpio, ret;
  1078. u32 num_lanes;
  1079. port->pcie = pcie;
  1080. if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
  1081. dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
  1082. child);
  1083. goto skip;
  1084. }
  1085. if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
  1086. port->lane = 0;
  1087. if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
  1088. port->is_x4 = true;
  1089. port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
  1090. port->lane);
  1091. if (!port->name) {
  1092. ret = -ENOMEM;
  1093. goto err;
  1094. }
  1095. port->devfn = of_pci_get_devfn(child);
  1096. if (port->devfn < 0)
  1097. goto skip;
  1098. if (PCI_FUNC(port->devfn) != 0) {
  1099. dev_err(dev, "%s: invalid function number, must be zero\n",
  1100. port->name);
  1101. goto skip;
  1102. }
  1103. ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
  1104. &port->mem_target, &port->mem_attr);
  1105. if (ret < 0) {
  1106. dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
  1107. port->name);
  1108. goto skip;
  1109. }
  1110. if (resource_size(&pcie->io) != 0) {
  1111. mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
  1112. &port->io_target, &port->io_attr);
  1113. } else {
  1114. port->io_target = -1;
  1115. port->io_attr = -1;
  1116. }
  1117. /*
  1118. * Old DT bindings do not contain "intx" interrupt
  1119. * so do not fail probing driver when interrupt does not exist.
  1120. */
  1121. port->intx_irq = of_irq_get_byname(child, "intx");
  1122. if (port->intx_irq == -EPROBE_DEFER) {
  1123. ret = port->intx_irq;
  1124. goto err;
  1125. }
  1126. if (port->intx_irq <= 0) {
  1127. dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
  1128. "%pOF does not contain intx interrupt\n",
  1129. port->name, child);
  1130. }
  1131. reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
  1132. if (reset_gpio == -EPROBE_DEFER) {
  1133. ret = reset_gpio;
  1134. goto err;
  1135. }
  1136. if (gpio_is_valid(reset_gpio)) {
  1137. unsigned long gpio_flags;
  1138. port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
  1139. port->name);
  1140. if (!port->reset_name) {
  1141. ret = -ENOMEM;
  1142. goto err;
  1143. }
  1144. if (flags & OF_GPIO_ACTIVE_LOW) {
  1145. dev_info(dev, "%pOF: reset gpio is active low\n",
  1146. child);
  1147. gpio_flags = GPIOF_ACTIVE_LOW |
  1148. GPIOF_OUT_INIT_LOW;
  1149. } else {
  1150. gpio_flags = GPIOF_OUT_INIT_HIGH;
  1151. }
  1152. ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
  1153. port->reset_name);
  1154. if (ret) {
  1155. if (ret == -EPROBE_DEFER)
  1156. goto err;
  1157. goto skip;
  1158. }
  1159. port->reset_gpio = gpio_to_desc(reset_gpio);
  1160. }
  1161. slot_power_limit = of_pci_get_slot_power_limit(child,
  1162. &port->slot_power_limit_value,
  1163. &port->slot_power_limit_scale);
  1164. if (slot_power_limit)
  1165. dev_info(dev, "%s: Slot power limit %u.%uW\n",
  1166. port->name,
  1167. slot_power_limit / 1000,
  1168. (slot_power_limit / 100) % 10);
  1169. port->clk = of_clk_get_by_name(child, NULL);
  1170. if (IS_ERR(port->clk)) {
  1171. dev_err(dev, "%s: cannot get clock\n", port->name);
  1172. goto skip;
  1173. }
  1174. ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
  1175. if (ret < 0) {
  1176. clk_put(port->clk);
  1177. goto err;
  1178. }
  1179. return 1;
  1180. skip:
  1181. ret = 0;
  1182. /* In the case of skipping, we need to free these */
  1183. devm_kfree(dev, port->reset_name);
  1184. port->reset_name = NULL;
  1185. devm_kfree(dev, port->name);
  1186. port->name = NULL;
  1187. err:
  1188. return ret;
  1189. }
  1190. /*
  1191. * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
  1192. * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
  1193. * of the PCI Express Card Electromechanical Specification, 1.1.
  1194. */
  1195. static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
  1196. {
  1197. int ret;
  1198. ret = clk_prepare_enable(port->clk);
  1199. if (ret < 0)
  1200. return ret;
  1201. if (port->reset_gpio) {
  1202. u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
  1203. of_property_read_u32(port->dn, "reset-delay-us",
  1204. &reset_udelay);
  1205. udelay(100);
  1206. gpiod_set_value_cansleep(port->reset_gpio, 0);
  1207. msleep(reset_udelay / 1000);
  1208. }
  1209. return 0;
  1210. }
  1211. /*
  1212. * Power down a PCIe port. Strictly, PCIe requires us to place the card
  1213. * in D3hot state before asserting PERST#.
  1214. */
  1215. static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
  1216. {
  1217. gpiod_set_value_cansleep(port->reset_gpio, 1);
  1218. clk_disable_unprepare(port->clk);
  1219. }
  1220. /*
  1221. * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
  1222. * so we need extra resource setup parsing our special DT properties encoding
  1223. * the MEM and IO apertures.
  1224. */
  1225. static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
  1226. {
  1227. struct device *dev = &pcie->pdev->dev;
  1228. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1229. int ret;
  1230. /* Get the PCIe memory aperture */
  1231. mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
  1232. if (resource_size(&pcie->mem) == 0) {
  1233. dev_err(dev, "invalid memory aperture size\n");
  1234. return -EINVAL;
  1235. }
  1236. pcie->mem.name = "PCI MEM";
  1237. pci_add_resource(&bridge->windows, &pcie->mem);
  1238. ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
  1239. if (ret)
  1240. return ret;
  1241. /* Get the PCIe IO aperture */
  1242. mvebu_mbus_get_pcie_io_aperture(&pcie->io);
  1243. if (resource_size(&pcie->io) != 0) {
  1244. pcie->realio.flags = pcie->io.flags;
  1245. pcie->realio.start = PCIBIOS_MIN_IO;
  1246. pcie->realio.end = min_t(resource_size_t,
  1247. IO_SPACE_LIMIT - SZ_64K,
  1248. resource_size(&pcie->io) - 1);
  1249. pcie->realio.name = "PCI I/O";
  1250. ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
  1251. if (ret)
  1252. return ret;
  1253. pci_add_resource(&bridge->windows, &pcie->realio);
  1254. ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
  1255. if (ret)
  1256. return ret;
  1257. }
  1258. return 0;
  1259. }
  1260. static int mvebu_pcie_probe(struct platform_device *pdev)
  1261. {
  1262. struct device *dev = &pdev->dev;
  1263. struct mvebu_pcie *pcie;
  1264. struct pci_host_bridge *bridge;
  1265. struct device_node *np = dev->of_node;
  1266. struct device_node *child;
  1267. int num, i, ret;
  1268. bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
  1269. if (!bridge)
  1270. return -ENOMEM;
  1271. pcie = pci_host_bridge_priv(bridge);
  1272. pcie->pdev = pdev;
  1273. platform_set_drvdata(pdev, pcie);
  1274. ret = mvebu_pcie_parse_request_resources(pcie);
  1275. if (ret)
  1276. return ret;
  1277. num = of_get_available_child_count(np);
  1278. pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
  1279. if (!pcie->ports)
  1280. return -ENOMEM;
  1281. i = 0;
  1282. for_each_available_child_of_node(np, child) {
  1283. struct mvebu_pcie_port *port = &pcie->ports[i];
  1284. ret = mvebu_pcie_parse_port(pcie, port, child);
  1285. if (ret < 0) {
  1286. of_node_put(child);
  1287. return ret;
  1288. } else if (ret == 0) {
  1289. continue;
  1290. }
  1291. port->dn = child;
  1292. i++;
  1293. }
  1294. pcie->nports = i;
  1295. for (i = 0; i < pcie->nports; i++) {
  1296. struct mvebu_pcie_port *port = &pcie->ports[i];
  1297. int irq = port->intx_irq;
  1298. child = port->dn;
  1299. if (!child)
  1300. continue;
  1301. ret = mvebu_pcie_powerup(port);
  1302. if (ret < 0)
  1303. continue;
  1304. port->base = mvebu_pcie_map_registers(pdev, child, port);
  1305. if (IS_ERR(port->base)) {
  1306. dev_err(dev, "%s: cannot map registers\n", port->name);
  1307. port->base = NULL;
  1308. mvebu_pcie_powerdown(port);
  1309. continue;
  1310. }
  1311. ret = mvebu_pci_bridge_emul_init(port);
  1312. if (ret < 0) {
  1313. dev_err(dev, "%s: cannot init emulated bridge\n",
  1314. port->name);
  1315. devm_iounmap(dev, port->base);
  1316. port->base = NULL;
  1317. mvebu_pcie_powerdown(port);
  1318. continue;
  1319. }
  1320. if (irq > 0) {
  1321. ret = mvebu_pcie_init_irq_domain(port);
  1322. if (ret) {
  1323. dev_err(dev, "%s: cannot init irq domain\n",
  1324. port->name);
  1325. pci_bridge_emul_cleanup(&port->bridge);
  1326. devm_iounmap(dev, port->base);
  1327. port->base = NULL;
  1328. mvebu_pcie_powerdown(port);
  1329. continue;
  1330. }
  1331. irq_set_chained_handler_and_data(irq,
  1332. mvebu_pcie_irq_handler,
  1333. port);
  1334. }
  1335. /*
  1336. * PCIe topology exported by mvebu hw is quite complicated. In
  1337. * reality has something like N fully independent host bridges
  1338. * where each host bridge has one PCIe Root Port (which acts as
  1339. * PCI Bridge device). Each host bridge has its own independent
  1340. * internal registers, independent access to PCI config space,
  1341. * independent interrupt lines, independent window and memory
  1342. * access configuration. But additionally there is some kind of
  1343. * peer-to-peer support between PCIe devices behind different
  1344. * host bridges limited just to forwarding of memory and I/O
  1345. * transactions (forwarding of error messages and config cycles
  1346. * is not supported). So we could say there are N independent
  1347. * PCIe Root Complexes.
  1348. *
  1349. * For this kind of setup DT should have been structured into
  1350. * N independent PCIe controllers / host bridges. But instead
  1351. * structure in past was defined to put PCIe Root Ports of all
  1352. * host bridges into one bus zero, like in classic multi-port
  1353. * Root Complex setup with just one host bridge.
  1354. *
  1355. * This means that pci-mvebu.c driver provides "virtual" bus 0
  1356. * on which registers all PCIe Root Ports (PCI Bridge devices)
  1357. * specified in DT by their BDF addresses and virtually routes
  1358. * PCI config access of each PCI bridge device to specific PCIe
  1359. * host bridge.
  1360. *
  1361. * Normally PCI Bridge should choose between Type 0 and Type 1
  1362. * config requests based on primary and secondary bus numbers
  1363. * configured on the bridge itself. But because mvebu PCI Bridge
  1364. * does not have registers for primary and secondary bus numbers
  1365. * in its config space, it determinates type of config requests
  1366. * via its own custom way.
  1367. *
  1368. * There are two options how mvebu determinate type of config
  1369. * request.
  1370. *
  1371. * 1. If Secondary Bus Number Enable bit is not set or is not
  1372. * available (applies for pre-XP PCIe controllers) then Type 0
  1373. * is used if target bus number equals Local Bus Number (bits
  1374. * [15:8] in register 0x1a04) and target device number differs
  1375. * from Local Device Number (bits [20:16] in register 0x1a04).
  1376. * Type 1 is used if target bus number differs from Local Bus
  1377. * Number. And when target bus number equals Local Bus Number
  1378. * and target device equals Local Device Number then request is
  1379. * routed to Local PCI Bridge (PCIe Root Port).
  1380. *
  1381. * 2. If Secondary Bus Number Enable bit is set (bit 7 in
  1382. * register 0x1a2c) then mvebu hw determinate type of config
  1383. * request like compliant PCI Bridge based on primary bus number
  1384. * which is configured via Local Bus Number (bits [15:8] in
  1385. * register 0x1a04) and secondary bus number which is configured
  1386. * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
  1387. * Local PCI Bridge (PCIe Root Port) is available on primary bus
  1388. * as device with Local Device Number (bits [20:16] in register
  1389. * 0x1a04).
  1390. *
  1391. * Secondary Bus Number Enable bit is disabled by default and
  1392. * option 2. is not available on pre-XP PCIe controllers. Hence
  1393. * this driver always use option 1.
  1394. *
  1395. * Basically it means that primary and secondary buses shares
  1396. * one virtual number configured via Local Bus Number bits and
  1397. * Local Device Number bits determinates if accessing primary
  1398. * or secondary bus. Set Local Device Number to 1 and redirect
  1399. * all writes of PCI Bridge Secondary Bus Number register to
  1400. * Local Bus Number (bits [15:8] in register 0x1a04).
  1401. *
  1402. * So when accessing devices on buses behind secondary bus
  1403. * number it would work correctly. And also when accessing
  1404. * device 0 at secondary bus number via config space would be
  1405. * correctly routed to secondary bus. Due to issues described
  1406. * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
  1407. * are not accessed directly via PCI config space but rarher
  1408. * indirectly via kernel emulated PCI bridge driver.
  1409. */
  1410. mvebu_pcie_setup_hw(port);
  1411. mvebu_pcie_set_local_dev_nr(port, 1);
  1412. mvebu_pcie_set_local_bus_nr(port, 0);
  1413. }
  1414. bridge->sysdata = pcie;
  1415. bridge->ops = &mvebu_pcie_ops;
  1416. bridge->child_ops = &mvebu_pcie_child_ops;
  1417. bridge->align_resource = mvebu_pcie_align_resource;
  1418. bridge->map_irq = mvebu_pcie_map_irq;
  1419. return pci_host_probe(bridge);
  1420. }
  1421. static int mvebu_pcie_remove(struct platform_device *pdev)
  1422. {
  1423. struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
  1424. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1425. u32 cmd, sspl;
  1426. int i;
  1427. /* Remove PCI bus with all devices. */
  1428. pci_lock_rescan_remove();
  1429. pci_stop_root_bus(bridge->bus);
  1430. pci_remove_root_bus(bridge->bus);
  1431. pci_unlock_rescan_remove();
  1432. for (i = 0; i < pcie->nports; i++) {
  1433. struct mvebu_pcie_port *port = &pcie->ports[i];
  1434. int irq = port->intx_irq;
  1435. if (!port->base)
  1436. continue;
  1437. /* Disable Root Bridge I/O space, memory space and bus mastering. */
  1438. cmd = mvebu_readl(port, PCIE_CMD_OFF);
  1439. cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  1440. mvebu_writel(port, cmd, PCIE_CMD_OFF);
  1441. /* Mask all interrupt sources. */
  1442. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
  1443. /* Clear all interrupt causes. */
  1444. mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
  1445. if (irq > 0)
  1446. irq_set_chained_handler_and_data(irq, NULL, NULL);
  1447. /* Remove IRQ domains. */
  1448. if (port->intx_irq_domain)
  1449. irq_domain_remove(port->intx_irq_domain);
  1450. /* Free config space for emulated root bridge. */
  1451. pci_bridge_emul_cleanup(&port->bridge);
  1452. /* Disable sending Set_Slot_Power_Limit PCIe Message. */
  1453. sspl = mvebu_readl(port, PCIE_SSPL_OFF);
  1454. sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
  1455. mvebu_writel(port, sspl, PCIE_SSPL_OFF);
  1456. /* Disable and clear BARs and windows. */
  1457. mvebu_pcie_disable_wins(port);
  1458. /* Delete PCIe IO and MEM windows. */
  1459. if (port->iowin.size)
  1460. mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
  1461. if (port->memwin.size)
  1462. mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
  1463. /* Power down card and disable clocks. Must be the last step. */
  1464. mvebu_pcie_powerdown(port);
  1465. }
  1466. return 0;
  1467. }
  1468. static const struct of_device_id mvebu_pcie_of_match_table[] = {
  1469. { .compatible = "marvell,armada-xp-pcie", },
  1470. { .compatible = "marvell,armada-370-pcie", },
  1471. { .compatible = "marvell,dove-pcie", },
  1472. { .compatible = "marvell,kirkwood-pcie", },
  1473. {},
  1474. };
  1475. static const struct dev_pm_ops mvebu_pcie_pm_ops = {
  1476. NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
  1477. };
  1478. static struct platform_driver mvebu_pcie_driver = {
  1479. .driver = {
  1480. .name = "mvebu-pcie",
  1481. .of_match_table = mvebu_pcie_of_match_table,
  1482. .pm = &mvebu_pcie_pm_ops,
  1483. },
  1484. .probe = mvebu_pcie_probe,
  1485. .remove = mvebu_pcie_remove,
  1486. };
  1487. module_platform_driver(mvebu_pcie_driver);
  1488. MODULE_AUTHOR("Thomas Petazzoni <[email protected]>");
  1489. MODULE_AUTHOR("Pali Rohár <[email protected]>");
  1490. MODULE_DESCRIPTION("Marvell EBU PCIe controller");
  1491. MODULE_LICENSE("GPL v2");