pcie-brcmstb.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /* Copyright (C) 2009 - 2019 Broadcom */
  3. #include <linux/bitfield.h>
  4. #include <linux/bitops.h>
  5. #include <linux/clk.h>
  6. #include <linux/compiler.h>
  7. #include <linux/delay.h>
  8. #include <linux/init.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/ioport.h>
  12. #include <linux/irqchip/chained_irq.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/kernel.h>
  15. #include <linux/list.h>
  16. #include <linux/log2.h>
  17. #include <linux/module.h>
  18. #include <linux/msi.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/of_pci.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/pci.h>
  24. #include <linux/pci-ecam.h>
  25. #include <linux/printk.h>
  26. #include <linux/regulator/consumer.h>
  27. #include <linux/reset.h>
  28. #include <linux/sizes.h>
  29. #include <linux/slab.h>
  30. #include <linux/string.h>
  31. #include <linux/types.h>
  32. #include "../pci.h"
  33. /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
  34. #define BRCM_PCIE_CAP_REGS 0x00ac
  35. /* Broadcom STB PCIe Register Offsets */
  36. #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
  37. #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
  38. #define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
  39. #define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
  40. #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
  41. #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
  42. #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
  43. #define PCIE_RC_DL_MDIO_ADDR 0x1100
  44. #define PCIE_RC_DL_MDIO_WR_DATA 0x1104
  45. #define PCIE_RC_DL_MDIO_RD_DATA 0x1108
  46. #define PCIE_MISC_MISC_CTRL 0x4008
  47. #define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
  48. #define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
  49. #define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
  50. #define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000
  51. #define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000
  52. #define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f
  53. #define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
  54. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
  55. #define PCIE_MEM_WIN0_LO(win) \
  56. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
  57. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010
  58. #define PCIE_MEM_WIN0_HI(win) \
  59. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
  60. #define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
  61. #define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
  62. #define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
  63. #define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
  64. #define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
  65. #define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
  66. #define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
  67. #define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
  68. #define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
  69. #define PCIE_MISC_MSI_DATA_CONFIG 0x404c
  70. #define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540
  71. #define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540
  72. #define PCIE_MISC_PCIE_CTRL 0x4064
  73. #define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1
  74. #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4
  75. #define PCIE_MISC_PCIE_STATUS 0x4068
  76. #define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80
  77. #define PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK 0x20
  78. #define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10
  79. #define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40
  80. #define PCIE_MISC_REVISION 0x406c
  81. #define BRCM_PCIE_HW_REV_33 0x0303
  82. #define BRCM_PCIE_HW_REV_3_20 0x0320
  83. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
  84. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
  85. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0
  86. #define PCIE_MEM_WIN0_BASE_LIMIT(win) \
  87. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
  88. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI 0x4080
  89. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK 0xff
  90. #define PCIE_MEM_WIN0_BASE_HI(win) \
  91. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
  92. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI 0x4084
  93. #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK 0xff
  94. #define PCIE_MEM_WIN0_LIMIT_HI(win) \
  95. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
  96. #define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
  97. #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
  98. #define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
  99. #define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
  100. #define PCIE_INTR2_CPU_BASE 0x4300
  101. #define PCIE_MSI_INTR2_BASE 0x4500
  102. /* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
  103. #define MSI_INT_STATUS 0x0
  104. #define MSI_INT_CLR 0x8
  105. #define MSI_INT_MASK_SET 0x10
  106. #define MSI_INT_MASK_CLR 0x14
  107. #define PCIE_EXT_CFG_DATA 0x8000
  108. #define PCIE_EXT_CFG_INDEX 0x9000
  109. #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
  110. #define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
  111. #define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
  112. #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
  113. #define RGR1_SW_INIT_1_INIT_7278_MASK 0x1
  114. #define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0
  115. /* PCIe parameters */
  116. #define BRCM_NUM_PCIE_OUT_WINS 0x4
  117. #define BRCM_INT_PCI_MSI_NR 32
  118. #define BRCM_INT_PCI_MSI_LEGACY_NR 8
  119. #define BRCM_INT_PCI_MSI_SHIFT 0
  120. #define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
  121. #define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \
  122. 32 - BRCM_INT_PCI_MSI_LEGACY_NR)
  123. /* MSI target addresses */
  124. #define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
  125. #define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL
  126. /* MDIO registers */
  127. #define MDIO_PORT0 0x0
  128. #define MDIO_DATA_MASK 0x7fffffff
  129. #define MDIO_PORT_MASK 0xf0000
  130. #define MDIO_REGAD_MASK 0xffff
  131. #define MDIO_CMD_MASK 0xfff00000
  132. #define MDIO_CMD_READ 0x1
  133. #define MDIO_CMD_WRITE 0x0
  134. #define MDIO_DATA_DONE_MASK 0x80000000
  135. #define MDIO_RD_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
  136. #define MDIO_WT_DONE(x) (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
  137. #define SSC_REGS_ADDR 0x1100
  138. #define SET_ADDR_OFFSET 0x1f
  139. #define SSC_CNTL_OFFSET 0x2
  140. #define SSC_CNTL_OVRD_EN_MASK 0x8000
  141. #define SSC_CNTL_OVRD_VAL_MASK 0x4000
  142. #define SSC_STATUS_OFFSET 0x1
  143. #define SSC_STATUS_SSC_MASK 0x400
  144. #define SSC_STATUS_PLL_LOCK_MASK 0x800
  145. #define PCIE_BRCM_MAX_MEMC 3
  146. #define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
  147. #define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
  148. #define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
  149. /* Rescal registers */
  150. #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
  151. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3
  152. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4
  153. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2
  154. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2
  155. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1
  156. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1
  157. #define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0
  158. /* Forward declarations */
  159. struct brcm_pcie;
  160. enum {
  161. RGR1_SW_INIT_1,
  162. EXT_CFG_INDEX,
  163. EXT_CFG_DATA,
  164. };
  165. enum {
  166. RGR1_SW_INIT_1_INIT_MASK,
  167. RGR1_SW_INIT_1_INIT_SHIFT,
  168. };
  169. enum pcie_type {
  170. GENERIC,
  171. BCM7425,
  172. BCM7435,
  173. BCM4908,
  174. BCM7278,
  175. BCM2711,
  176. };
  177. struct pcie_cfg_data {
  178. const int *offsets;
  179. const enum pcie_type type;
  180. void (*perst_set)(struct brcm_pcie *pcie, u32 val);
  181. void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
  182. };
  183. struct subdev_regulators {
  184. unsigned int num_supplies;
  185. struct regulator_bulk_data supplies[];
  186. };
  187. struct brcm_msi {
  188. struct device *dev;
  189. void __iomem *base;
  190. struct device_node *np;
  191. struct irq_domain *msi_domain;
  192. struct irq_domain *inner_domain;
  193. struct mutex lock; /* guards the alloc/free operations */
  194. u64 target_addr;
  195. int irq;
  196. DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
  197. bool legacy;
  198. /* Some chips have MSIs in bits [31..24] of a shared register. */
  199. int legacy_shift;
  200. int nr; /* No. of MSI available, depends on chip */
  201. /* This is the base pointer for interrupt status/set/clr regs */
  202. void __iomem *intr_base;
  203. };
  204. /* Internal PCIe Host Controller Information.*/
  205. struct brcm_pcie {
  206. struct device *dev;
  207. void __iomem *base;
  208. struct clk *clk;
  209. struct device_node *np;
  210. bool ssc;
  211. int gen;
  212. u64 msi_target_addr;
  213. struct brcm_msi *msi;
  214. const int *reg_offsets;
  215. enum pcie_type type;
  216. struct reset_control *rescal;
  217. struct reset_control *perst_reset;
  218. int num_memc;
  219. u64 memc_size[PCIE_BRCM_MAX_MEMC];
  220. u32 hw_rev;
  221. void (*perst_set)(struct brcm_pcie *pcie, u32 val);
  222. void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
  223. struct subdev_regulators *sr;
  224. bool ep_wakeup_capable;
  225. };
  226. static inline bool is_bmips(const struct brcm_pcie *pcie)
  227. {
  228. return pcie->type == BCM7435 || pcie->type == BCM7425;
  229. }
  230. /*
  231. * This is to convert the size of the inbound "BAR" region to the
  232. * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
  233. */
  234. static int brcm_pcie_encode_ibar_size(u64 size)
  235. {
  236. int log2_in = ilog2(size);
  237. if (log2_in >= 12 && log2_in <= 15)
  238. /* Covers 4KB to 32KB (inclusive) */
  239. return (log2_in - 12) + 0x1c;
  240. else if (log2_in >= 16 && log2_in <= 35)
  241. /* Covers 64KB to 32GB, (inclusive) */
  242. return log2_in - 15;
  243. /* Something is awry so disable */
  244. return 0;
  245. }
  246. static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
  247. {
  248. u32 pkt = 0;
  249. pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
  250. pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
  251. pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
  252. return pkt;
  253. }
  254. /* negative return value indicates error */
  255. static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
  256. {
  257. int tries;
  258. u32 data;
  259. writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
  260. base + PCIE_RC_DL_MDIO_ADDR);
  261. readl(base + PCIE_RC_DL_MDIO_ADDR);
  262. data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
  263. for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
  264. udelay(10);
  265. data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
  266. }
  267. *val = FIELD_GET(MDIO_DATA_MASK, data);
  268. return MDIO_RD_DONE(data) ? 0 : -EIO;
  269. }
  270. /* negative return value indicates error */
  271. static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
  272. u8 regad, u16 wrdata)
  273. {
  274. int tries;
  275. u32 data;
  276. writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
  277. base + PCIE_RC_DL_MDIO_ADDR);
  278. readl(base + PCIE_RC_DL_MDIO_ADDR);
  279. writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
  280. data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
  281. for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
  282. udelay(10);
  283. data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
  284. }
  285. return MDIO_WT_DONE(data) ? 0 : -EIO;
  286. }
  287. /*
  288. * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
  289. * return value indicates error.
  290. */
  291. static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
  292. {
  293. int pll, ssc;
  294. int ret;
  295. u32 tmp;
  296. ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
  297. SSC_REGS_ADDR);
  298. if (ret < 0)
  299. return ret;
  300. ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
  301. SSC_CNTL_OFFSET, &tmp);
  302. if (ret < 0)
  303. return ret;
  304. u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
  305. u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
  306. ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
  307. SSC_CNTL_OFFSET, tmp);
  308. if (ret < 0)
  309. return ret;
  310. usleep_range(1000, 2000);
  311. ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
  312. SSC_STATUS_OFFSET, &tmp);
  313. if (ret < 0)
  314. return ret;
  315. ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
  316. pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
  317. return ssc && pll ? 0 : -EIO;
  318. }
  319. /* Limits operation to a specific generation (1, 2, or 3) */
  320. static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
  321. {
  322. u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
  323. u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
  324. lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
  325. writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
  326. lnkctl2 = (lnkctl2 & ~0xf) | gen;
  327. writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
  328. }
  329. static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
  330. unsigned int win, u64 cpu_addr,
  331. u64 pcie_addr, u64 size)
  332. {
  333. u32 cpu_addr_mb_high, limit_addr_mb_high;
  334. phys_addr_t cpu_addr_mb, limit_addr_mb;
  335. int high_addr_shift;
  336. u32 tmp;
  337. /* Set the base of the pcie_addr window */
  338. writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
  339. writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
  340. /* Write the addr base & limit lower bits (in MBs) */
  341. cpu_addr_mb = cpu_addr / SZ_1M;
  342. limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
  343. tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
  344. u32p_replace_bits(&tmp, cpu_addr_mb,
  345. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
  346. u32p_replace_bits(&tmp, limit_addr_mb,
  347. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
  348. writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
  349. if (is_bmips(pcie))
  350. return;
  351. /* Write the cpu & limit addr upper bits */
  352. high_addr_shift =
  353. HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
  354. cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
  355. tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
  356. u32p_replace_bits(&tmp, cpu_addr_mb_high,
  357. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
  358. writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
  359. limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
  360. tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
  361. u32p_replace_bits(&tmp, limit_addr_mb_high,
  362. PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
  363. writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
  364. }
  365. static struct irq_chip brcm_msi_irq_chip = {
  366. .name = "BRCM STB PCIe MSI",
  367. .irq_ack = irq_chip_ack_parent,
  368. .irq_mask = pci_msi_mask_irq,
  369. .irq_unmask = pci_msi_unmask_irq,
  370. };
  371. static struct msi_domain_info brcm_msi_domain_info = {
  372. /* Multi MSI is supported by the controller, but not by this driver */
  373. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
  374. .chip = &brcm_msi_irq_chip,
  375. };
  376. static void brcm_pcie_msi_isr(struct irq_desc *desc)
  377. {
  378. struct irq_chip *chip = irq_desc_get_chip(desc);
  379. unsigned long status;
  380. struct brcm_msi *msi;
  381. struct device *dev;
  382. u32 bit;
  383. chained_irq_enter(chip, desc);
  384. msi = irq_desc_get_handler_data(desc);
  385. dev = msi->dev;
  386. status = readl(msi->intr_base + MSI_INT_STATUS);
  387. status >>= msi->legacy_shift;
  388. for_each_set_bit(bit, &status, msi->nr) {
  389. int ret;
  390. ret = generic_handle_domain_irq(msi->inner_domain, bit);
  391. if (ret)
  392. dev_dbg(dev, "unexpected MSI\n");
  393. }
  394. chained_irq_exit(chip, desc);
  395. }
  396. static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  397. {
  398. struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
  399. msg->address_lo = lower_32_bits(msi->target_addr);
  400. msg->address_hi = upper_32_bits(msi->target_addr);
  401. msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
  402. }
  403. static int brcm_msi_set_affinity(struct irq_data *irq_data,
  404. const struct cpumask *mask, bool force)
  405. {
  406. return -EINVAL;
  407. }
  408. static void brcm_msi_ack_irq(struct irq_data *data)
  409. {
  410. struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
  411. const int shift_amt = data->hwirq + msi->legacy_shift;
  412. writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
  413. }
  414. static struct irq_chip brcm_msi_bottom_irq_chip = {
  415. .name = "BRCM STB MSI",
  416. .irq_compose_msi_msg = brcm_msi_compose_msi_msg,
  417. .irq_set_affinity = brcm_msi_set_affinity,
  418. .irq_ack = brcm_msi_ack_irq,
  419. };
  420. static int brcm_msi_alloc(struct brcm_msi *msi)
  421. {
  422. int hwirq;
  423. mutex_lock(&msi->lock);
  424. hwirq = bitmap_find_free_region(msi->used, msi->nr, 0);
  425. mutex_unlock(&msi->lock);
  426. return hwirq;
  427. }
  428. static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
  429. {
  430. mutex_lock(&msi->lock);
  431. bitmap_release_region(msi->used, hwirq, 0);
  432. mutex_unlock(&msi->lock);
  433. }
  434. static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  435. unsigned int nr_irqs, void *args)
  436. {
  437. struct brcm_msi *msi = domain->host_data;
  438. int hwirq;
  439. hwirq = brcm_msi_alloc(msi);
  440. if (hwirq < 0)
  441. return hwirq;
  442. irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
  443. &brcm_msi_bottom_irq_chip, domain->host_data,
  444. handle_edge_irq, NULL, NULL);
  445. return 0;
  446. }
  447. static void brcm_irq_domain_free(struct irq_domain *domain,
  448. unsigned int virq, unsigned int nr_irqs)
  449. {
  450. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  451. struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
  452. brcm_msi_free(msi, d->hwirq);
  453. }
  454. static const struct irq_domain_ops msi_domain_ops = {
  455. .alloc = brcm_irq_domain_alloc,
  456. .free = brcm_irq_domain_free,
  457. };
  458. static int brcm_allocate_domains(struct brcm_msi *msi)
  459. {
  460. struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
  461. struct device *dev = msi->dev;
  462. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
  463. if (!msi->inner_domain) {
  464. dev_err(dev, "failed to create IRQ domain\n");
  465. return -ENOMEM;
  466. }
  467. msi->msi_domain = pci_msi_create_irq_domain(fwnode,
  468. &brcm_msi_domain_info,
  469. msi->inner_domain);
  470. if (!msi->msi_domain) {
  471. dev_err(dev, "failed to create MSI domain\n");
  472. irq_domain_remove(msi->inner_domain);
  473. return -ENOMEM;
  474. }
  475. return 0;
  476. }
  477. static void brcm_free_domains(struct brcm_msi *msi)
  478. {
  479. irq_domain_remove(msi->msi_domain);
  480. irq_domain_remove(msi->inner_domain);
  481. }
  482. static void brcm_msi_remove(struct brcm_pcie *pcie)
  483. {
  484. struct brcm_msi *msi = pcie->msi;
  485. if (!msi)
  486. return;
  487. irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
  488. brcm_free_domains(msi);
  489. }
  490. static void brcm_msi_set_regs(struct brcm_msi *msi)
  491. {
  492. u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
  493. BRCM_INT_PCI_MSI_MASK;
  494. writel(val, msi->intr_base + MSI_INT_MASK_CLR);
  495. writel(val, msi->intr_base + MSI_INT_CLR);
  496. /*
  497. * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
  498. * enable, which we set to 1.
  499. */
  500. writel(lower_32_bits(msi->target_addr) | 0x1,
  501. msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
  502. writel(upper_32_bits(msi->target_addr),
  503. msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
  504. val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
  505. writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
  506. }
  507. static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
  508. {
  509. struct brcm_msi *msi;
  510. int irq, ret;
  511. struct device *dev = pcie->dev;
  512. irq = irq_of_parse_and_map(dev->of_node, 1);
  513. if (irq <= 0) {
  514. dev_err(dev, "cannot map MSI interrupt\n");
  515. return -ENODEV;
  516. }
  517. msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
  518. if (!msi)
  519. return -ENOMEM;
  520. mutex_init(&msi->lock);
  521. msi->dev = dev;
  522. msi->base = pcie->base;
  523. msi->np = pcie->np;
  524. msi->target_addr = pcie->msi_target_addr;
  525. msi->irq = irq;
  526. msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
  527. /*
  528. * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
  529. * is large enough.
  530. */
  531. BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
  532. if (msi->legacy) {
  533. msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
  534. msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
  535. msi->legacy_shift = 24;
  536. } else {
  537. msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
  538. msi->nr = BRCM_INT_PCI_MSI_NR;
  539. msi->legacy_shift = 0;
  540. }
  541. ret = brcm_allocate_domains(msi);
  542. if (ret)
  543. return ret;
  544. irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
  545. brcm_msi_set_regs(msi);
  546. pcie->msi = msi;
  547. return 0;
  548. }
  549. /* The controller is capable of serving in both RC and EP roles */
  550. static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
  551. {
  552. void __iomem *base = pcie->base;
  553. u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
  554. return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
  555. }
  556. static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
  557. {
  558. u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
  559. u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
  560. u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
  561. return dla && plu;
  562. }
  563. static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
  564. unsigned int devfn, int where)
  565. {
  566. struct brcm_pcie *pcie = bus->sysdata;
  567. void __iomem *base = pcie->base;
  568. int idx;
  569. /* Accesses to the RC go right to the RC registers if !devfn */
  570. if (pci_is_root_bus(bus))
  571. return devfn ? NULL : base + PCIE_ECAM_REG(where);
  572. /* An access to our HW w/o link-up will cause a CPU Abort */
  573. if (!brcm_pcie_link_up(pcie))
  574. return NULL;
  575. /* For devices, write to the config space index register */
  576. idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
  577. writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
  578. return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
  579. }
  580. static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
  581. unsigned int devfn, int where)
  582. {
  583. struct brcm_pcie *pcie = bus->sysdata;
  584. void __iomem *base = pcie->base;
  585. int idx;
  586. /* Accesses to the RC go right to the RC registers if !devfn */
  587. if (pci_is_root_bus(bus))
  588. return devfn ? NULL : base + PCIE_ECAM_REG(where);
  589. /* An access to our HW w/o link-up will cause a CPU Abort */
  590. if (!brcm_pcie_link_up(pcie))
  591. return NULL;
  592. /* For devices, write to the config space index register */
  593. idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
  594. writel(idx, base + IDX_ADDR(pcie));
  595. return base + DATA_ADDR(pcie);
  596. }
  597. static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
  598. {
  599. u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
  600. u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
  601. tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  602. tmp = (tmp & ~mask) | ((val << shift) & mask);
  603. writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  604. }
  605. static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
  606. {
  607. u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
  608. u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
  609. tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  610. tmp = (tmp & ~mask) | ((val << shift) & mask);
  611. writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  612. }
  613. static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
  614. {
  615. if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
  616. return;
  617. if (val)
  618. reset_control_assert(pcie->perst_reset);
  619. else
  620. reset_control_deassert(pcie->perst_reset);
  621. }
  622. static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
  623. {
  624. u32 tmp;
  625. /* Perst bit has moved and assert value is 0 */
  626. tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
  627. u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
  628. writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
  629. }
  630. static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
  631. {
  632. u32 tmp;
  633. tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  634. u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
  635. writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
  636. }
  637. static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
  638. u64 *rc_bar2_size,
  639. u64 *rc_bar2_offset)
  640. {
  641. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  642. struct resource_entry *entry;
  643. struct device *dev = pcie->dev;
  644. u64 lowest_pcie_addr = ~(u64)0;
  645. int ret, i = 0;
  646. u64 size = 0;
  647. resource_list_for_each_entry(entry, &bridge->dma_ranges) {
  648. u64 pcie_beg = entry->res->start - entry->offset;
  649. size += entry->res->end - entry->res->start + 1;
  650. if (pcie_beg < lowest_pcie_addr)
  651. lowest_pcie_addr = pcie_beg;
  652. }
  653. if (lowest_pcie_addr == ~(u64)0) {
  654. dev_err(dev, "DT node has no dma-ranges\n");
  655. return -EINVAL;
  656. }
  657. ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
  658. PCIE_BRCM_MAX_MEMC);
  659. if (ret <= 0) {
  660. /* Make an educated guess */
  661. pcie->num_memc = 1;
  662. pcie->memc_size[0] = 1ULL << fls64(size - 1);
  663. } else {
  664. pcie->num_memc = ret;
  665. }
  666. /* Each memc is viewed through a "port" that is a power of 2 */
  667. for (i = 0, size = 0; i < pcie->num_memc; i++)
  668. size += pcie->memc_size[i];
  669. /* System memory starts at this address in PCIe-space */
  670. *rc_bar2_offset = lowest_pcie_addr;
  671. /* The sum of all memc views must also be a power of 2 */
  672. *rc_bar2_size = 1ULL << fls64(size - 1);
  673. /*
  674. * We validate the inbound memory view even though we should trust
  675. * whatever the device-tree provides. This is because of an HW issue on
  676. * early Raspberry Pi 4's revisions (bcm2711). It turns out its
  677. * firmware has to dynamically edit dma-ranges due to a bug on the
  678. * PCIe controller integration, which prohibits any access above the
  679. * lower 3GB of memory. Given this, we decided to keep the dma-ranges
  680. * in check, avoiding hard to debug device-tree related issues in the
  681. * future:
  682. *
  683. * The PCIe host controller by design must set the inbound viewport to
  684. * be a contiguous arrangement of all of the system's memory. In
  685. * addition, its size mut be a power of two. To further complicate
  686. * matters, the viewport must start on a pcie-address that is aligned
  687. * on a multiple of its size. If a portion of the viewport does not
  688. * represent system memory -- e.g. 3GB of memory requires a 4GB
  689. * viewport -- we can map the outbound memory in or after 3GB and even
  690. * though the viewport will overlap the outbound memory the controller
  691. * will know to send outbound memory downstream and everything else
  692. * upstream.
  693. *
  694. * For example:
  695. *
  696. * - The best-case scenario, memory up to 3GB, is to place the inbound
  697. * region in the first 4GB of pcie-space, as some legacy devices can
  698. * only address 32bits. We would also like to put the MSI under 4GB
  699. * as well, since some devices require a 32bit MSI target address.
  700. *
  701. * - If the system memory is 4GB or larger we cannot start the inbound
  702. * region at location 0 (since we have to allow some space for
  703. * outbound memory @ 3GB). So instead it will start at the 1x
  704. * multiple of its size
  705. */
  706. if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
  707. (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
  708. dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
  709. *rc_bar2_size, *rc_bar2_offset);
  710. return -EINVAL;
  711. }
  712. return 0;
  713. }
  714. static int brcm_pcie_setup(struct brcm_pcie *pcie)
  715. {
  716. u64 rc_bar2_offset, rc_bar2_size;
  717. void __iomem *base = pcie->base;
  718. struct pci_host_bridge *bridge;
  719. struct resource_entry *entry;
  720. u32 tmp, burst, aspm_support;
  721. int num_out_wins = 0;
  722. int ret, memc;
  723. /* Reset the bridge */
  724. pcie->bridge_sw_init_set(pcie, 1);
  725. usleep_range(100, 200);
  726. /* Take the bridge out of reset */
  727. pcie->bridge_sw_init_set(pcie, 0);
  728. tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  729. if (is_bmips(pcie))
  730. tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
  731. else
  732. tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
  733. writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  734. /* Wait for SerDes to be stable */
  735. usleep_range(100, 200);
  736. /*
  737. * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it
  738. * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
  739. * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
  740. */
  741. if (is_bmips(pcie))
  742. burst = 0x1; /* 256 bytes */
  743. else if (pcie->type == BCM2711)
  744. burst = 0x0; /* 128 bytes */
  745. else if (pcie->type == BCM7278)
  746. burst = 0x3; /* 512 bytes */
  747. else
  748. burst = 0x2; /* 512 bytes */
  749. /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
  750. tmp = readl(base + PCIE_MISC_MISC_CTRL);
  751. u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
  752. u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
  753. u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
  754. writel(tmp, base + PCIE_MISC_MISC_CTRL);
  755. ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
  756. &rc_bar2_offset);
  757. if (ret)
  758. return ret;
  759. tmp = lower_32_bits(rc_bar2_offset);
  760. u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
  761. PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
  762. writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
  763. writel(upper_32_bits(rc_bar2_offset),
  764. base + PCIE_MISC_RC_BAR2_CONFIG_HI);
  765. tmp = readl(base + PCIE_MISC_MISC_CTRL);
  766. for (memc = 0; memc < pcie->num_memc; memc++) {
  767. u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
  768. if (memc == 0)
  769. u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
  770. else if (memc == 1)
  771. u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
  772. else if (memc == 2)
  773. u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
  774. }
  775. writel(tmp, base + PCIE_MISC_MISC_CTRL);
  776. /*
  777. * We ideally want the MSI target address to be located in the 32bit
  778. * addressable memory area. Some devices might depend on it. This is
  779. * possible either when the inbound window is located above the lower
  780. * 4GB or when the inbound area is smaller than 4GB (taking into
  781. * account the rounding-up we're forced to perform).
  782. */
  783. if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
  784. pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
  785. else
  786. pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
  787. if (!brcm_pcie_rc_mode(pcie)) {
  788. dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
  789. return -EINVAL;
  790. }
  791. /* disable the PCIe->GISB memory window (RC_BAR1) */
  792. tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
  793. tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
  794. writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
  795. /* disable the PCIe->SCB memory window (RC_BAR3) */
  796. tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
  797. tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
  798. writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
  799. /* Don't advertise L0s capability if 'aspm-no-l0s' */
  800. aspm_support = PCIE_LINK_STATE_L1;
  801. if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
  802. aspm_support |= PCIE_LINK_STATE_L0S;
  803. tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
  804. u32p_replace_bits(&tmp, aspm_support,
  805. PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
  806. writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
  807. /*
  808. * For config space accesses on the RC, show the right class for
  809. * a PCIe-PCIe bridge (the default setting is to be EP mode).
  810. */
  811. tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
  812. u32p_replace_bits(&tmp, 0x060400,
  813. PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
  814. writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
  815. bridge = pci_host_bridge_from_priv(pcie);
  816. resource_list_for_each_entry(entry, &bridge->windows) {
  817. struct resource *res = entry->res;
  818. if (resource_type(res) != IORESOURCE_MEM)
  819. continue;
  820. if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
  821. dev_err(pcie->dev, "too many outbound wins\n");
  822. return -EINVAL;
  823. }
  824. if (is_bmips(pcie)) {
  825. u64 start = res->start;
  826. unsigned int j, nwins = resource_size(res) / SZ_128M;
  827. /* bmips PCIe outbound windows have a 128MB max size */
  828. if (nwins > BRCM_NUM_PCIE_OUT_WINS)
  829. nwins = BRCM_NUM_PCIE_OUT_WINS;
  830. for (j = 0; j < nwins; j++, start += SZ_128M)
  831. brcm_pcie_set_outbound_win(pcie, j, start,
  832. start - entry->offset,
  833. SZ_128M);
  834. break;
  835. }
  836. brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
  837. res->start - entry->offset,
  838. resource_size(res));
  839. num_out_wins++;
  840. }
  841. /* PCIe->SCB endian mode for BAR */
  842. tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
  843. u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
  844. PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
  845. writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
  846. return 0;
  847. }
  848. static int brcm_pcie_start_link(struct brcm_pcie *pcie)
  849. {
  850. struct device *dev = pcie->dev;
  851. void __iomem *base = pcie->base;
  852. u16 nlw, cls, lnksta;
  853. bool ssc_good = false;
  854. u32 tmp;
  855. int ret, i;
  856. /* Unassert the fundamental reset */
  857. pcie->perst_set(pcie, 0);
  858. /*
  859. * Give the RC/EP time to wake up, before trying to configure RC.
  860. * Intermittently check status for link-up, up to a total of 100ms.
  861. */
  862. for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
  863. msleep(5);
  864. if (!brcm_pcie_link_up(pcie)) {
  865. dev_err(dev, "link down\n");
  866. return -ENODEV;
  867. }
  868. if (pcie->gen)
  869. brcm_pcie_set_gen(pcie, pcie->gen);
  870. if (pcie->ssc) {
  871. ret = brcm_pcie_set_ssc(pcie);
  872. if (ret == 0)
  873. ssc_good = true;
  874. else
  875. dev_err(dev, "failed attempt to enter ssc mode\n");
  876. }
  877. lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
  878. cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
  879. nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
  880. dev_info(dev, "link up, %s x%u %s\n",
  881. pci_speed_string(pcie_link_speed[cls]), nlw,
  882. ssc_good ? "(SSC)" : "(!SSC)");
  883. /*
  884. * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
  885. * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
  886. */
  887. tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  888. tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
  889. writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  890. return 0;
  891. }
  892. static const char * const supplies[] = {
  893. "vpcie3v3",
  894. "vpcie3v3aux",
  895. "vpcie12v",
  896. };
  897. static void *alloc_subdev_regulators(struct device *dev)
  898. {
  899. const size_t size = sizeof(struct subdev_regulators) +
  900. sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
  901. struct subdev_regulators *sr;
  902. int i;
  903. sr = devm_kzalloc(dev, size, GFP_KERNEL);
  904. if (sr) {
  905. sr->num_supplies = ARRAY_SIZE(supplies);
  906. for (i = 0; i < ARRAY_SIZE(supplies); i++)
  907. sr->supplies[i].supply = supplies[i];
  908. }
  909. return sr;
  910. }
  911. static int brcm_pcie_add_bus(struct pci_bus *bus)
  912. {
  913. struct brcm_pcie *pcie = bus->sysdata;
  914. struct device *dev = &bus->dev;
  915. struct subdev_regulators *sr;
  916. int ret;
  917. if (!bus->parent || !pci_is_root_bus(bus->parent))
  918. return 0;
  919. if (dev->of_node) {
  920. sr = alloc_subdev_regulators(dev);
  921. if (!sr) {
  922. dev_info(dev, "Can't allocate regulators for downstream device\n");
  923. goto no_regulators;
  924. }
  925. pcie->sr = sr;
  926. ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
  927. if (ret) {
  928. dev_info(dev, "No regulators for downstream device\n");
  929. goto no_regulators;
  930. }
  931. ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
  932. if (ret) {
  933. dev_err(dev, "Can't enable regulators for downstream device\n");
  934. regulator_bulk_free(sr->num_supplies, sr->supplies);
  935. pcie->sr = NULL;
  936. }
  937. }
  938. no_regulators:
  939. brcm_pcie_start_link(pcie);
  940. return 0;
  941. }
  942. static void brcm_pcie_remove_bus(struct pci_bus *bus)
  943. {
  944. struct brcm_pcie *pcie = bus->sysdata;
  945. struct subdev_regulators *sr = pcie->sr;
  946. struct device *dev = &bus->dev;
  947. if (!sr)
  948. return;
  949. if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
  950. dev_err(dev, "Failed to disable regulators for downstream device\n");
  951. regulator_bulk_free(sr->num_supplies, sr->supplies);
  952. pcie->sr = NULL;
  953. }
  954. /* L23 is a low-power PCIe link state */
  955. static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
  956. {
  957. void __iomem *base = pcie->base;
  958. int l23, i;
  959. u32 tmp;
  960. /* Assert request for L23 */
  961. tmp = readl(base + PCIE_MISC_PCIE_CTRL);
  962. u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
  963. writel(tmp, base + PCIE_MISC_PCIE_CTRL);
  964. /* Wait up to 36 msec for L23 */
  965. tmp = readl(base + PCIE_MISC_PCIE_STATUS);
  966. l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
  967. for (i = 0; i < 15 && !l23; i++) {
  968. usleep_range(2000, 2400);
  969. tmp = readl(base + PCIE_MISC_PCIE_STATUS);
  970. l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
  971. tmp);
  972. }
  973. if (!l23)
  974. dev_err(pcie->dev, "failed to enter low-power link state\n");
  975. }
  976. static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
  977. {
  978. static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
  979. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
  980. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
  981. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
  982. static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
  983. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
  984. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
  985. PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
  986. const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
  987. const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
  988. u32 tmp, combined_mask = 0;
  989. u32 val;
  990. void __iomem *base = pcie->base;
  991. int i, ret;
  992. for (i = beg; i != end; start ? i++ : i--) {
  993. val = start ? BIT_MASK(shifts[i]) : 0;
  994. tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
  995. tmp = (tmp & ~masks[i]) | (val & masks[i]);
  996. writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
  997. usleep_range(50, 200);
  998. combined_mask |= masks[i];
  999. }
  1000. tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
  1001. val = start ? combined_mask : 0;
  1002. ret = (tmp & combined_mask) == val ? 0 : -EIO;
  1003. if (ret)
  1004. dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
  1005. return ret;
  1006. }
  1007. static inline int brcm_phy_start(struct brcm_pcie *pcie)
  1008. {
  1009. return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
  1010. }
  1011. static inline int brcm_phy_stop(struct brcm_pcie *pcie)
  1012. {
  1013. return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
  1014. }
  1015. static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
  1016. {
  1017. void __iomem *base = pcie->base;
  1018. int tmp;
  1019. if (brcm_pcie_link_up(pcie))
  1020. brcm_pcie_enter_l23(pcie);
  1021. /* Assert fundamental reset */
  1022. pcie->perst_set(pcie, 1);
  1023. /* Deassert request for L23 in case it was asserted */
  1024. tmp = readl(base + PCIE_MISC_PCIE_CTRL);
  1025. u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
  1026. writel(tmp, base + PCIE_MISC_PCIE_CTRL);
  1027. /* Turn off SerDes */
  1028. tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  1029. u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
  1030. writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  1031. /* Shutdown PCIe bridge */
  1032. pcie->bridge_sw_init_set(pcie, 1);
  1033. }
  1034. static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
  1035. {
  1036. bool *ret = data;
  1037. if (device_may_wakeup(&dev->dev)) {
  1038. *ret = true;
  1039. dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
  1040. }
  1041. return (int) *ret;
  1042. }
  1043. static int brcm_pcie_suspend_noirq(struct device *dev)
  1044. {
  1045. struct brcm_pcie *pcie = dev_get_drvdata(dev);
  1046. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1047. int ret;
  1048. brcm_pcie_turn_off(pcie);
  1049. /*
  1050. * If brcm_phy_stop() returns an error, just dev_err(). If we
  1051. * return the error it will cause the suspend to fail and this is a
  1052. * forgivable offense that will probably be erased on resume.
  1053. */
  1054. if (brcm_phy_stop(pcie))
  1055. dev_err(dev, "Could not stop phy for suspend\n");
  1056. ret = reset_control_rearm(pcie->rescal);
  1057. if (ret) {
  1058. dev_err(dev, "Could not rearm rescal reset\n");
  1059. return ret;
  1060. }
  1061. if (pcie->sr) {
  1062. /*
  1063. * Now turn off the regulators, but if at least one
  1064. * downstream device is enabled as a wake-up source, do not
  1065. * turn off regulators.
  1066. */
  1067. pcie->ep_wakeup_capable = false;
  1068. pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
  1069. &pcie->ep_wakeup_capable);
  1070. if (!pcie->ep_wakeup_capable) {
  1071. ret = regulator_bulk_disable(pcie->sr->num_supplies,
  1072. pcie->sr->supplies);
  1073. if (ret) {
  1074. dev_err(dev, "Could not turn off regulators\n");
  1075. reset_control_reset(pcie->rescal);
  1076. return ret;
  1077. }
  1078. }
  1079. }
  1080. clk_disable_unprepare(pcie->clk);
  1081. return 0;
  1082. }
  1083. static int brcm_pcie_resume_noirq(struct device *dev)
  1084. {
  1085. struct brcm_pcie *pcie = dev_get_drvdata(dev);
  1086. void __iomem *base;
  1087. u32 tmp;
  1088. int ret;
  1089. base = pcie->base;
  1090. ret = clk_prepare_enable(pcie->clk);
  1091. if (ret)
  1092. return ret;
  1093. ret = reset_control_reset(pcie->rescal);
  1094. if (ret)
  1095. goto err_disable_clk;
  1096. ret = brcm_phy_start(pcie);
  1097. if (ret)
  1098. goto err_reset;
  1099. /* Take bridge out of reset so we can access the SERDES reg */
  1100. pcie->bridge_sw_init_set(pcie, 0);
  1101. /* SERDES_IDDQ = 0 */
  1102. tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  1103. u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
  1104. writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
  1105. /* wait for serdes to be stable */
  1106. udelay(100);
  1107. ret = brcm_pcie_setup(pcie);
  1108. if (ret)
  1109. goto err_reset;
  1110. if (pcie->sr) {
  1111. if (pcie->ep_wakeup_capable) {
  1112. /*
  1113. * We are resuming from a suspend. In the suspend we
  1114. * did not disable the power supplies, so there is
  1115. * no need to enable them (and falsely increase their
  1116. * usage count).
  1117. */
  1118. pcie->ep_wakeup_capable = false;
  1119. } else {
  1120. ret = regulator_bulk_enable(pcie->sr->num_supplies,
  1121. pcie->sr->supplies);
  1122. if (ret) {
  1123. dev_err(dev, "Could not turn on regulators\n");
  1124. goto err_reset;
  1125. }
  1126. }
  1127. }
  1128. ret = brcm_pcie_start_link(pcie);
  1129. if (ret)
  1130. goto err_regulator;
  1131. if (pcie->msi)
  1132. brcm_msi_set_regs(pcie->msi);
  1133. return 0;
  1134. err_regulator:
  1135. if (pcie->sr)
  1136. regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
  1137. err_reset:
  1138. reset_control_rearm(pcie->rescal);
  1139. err_disable_clk:
  1140. clk_disable_unprepare(pcie->clk);
  1141. return ret;
  1142. }
  1143. static void __brcm_pcie_remove(struct brcm_pcie *pcie)
  1144. {
  1145. brcm_msi_remove(pcie);
  1146. brcm_pcie_turn_off(pcie);
  1147. if (brcm_phy_stop(pcie))
  1148. dev_err(pcie->dev, "Could not stop phy\n");
  1149. if (reset_control_rearm(pcie->rescal))
  1150. dev_err(pcie->dev, "Could not rearm rescal reset\n");
  1151. clk_disable_unprepare(pcie->clk);
  1152. }
  1153. static int brcm_pcie_remove(struct platform_device *pdev)
  1154. {
  1155. struct brcm_pcie *pcie = platform_get_drvdata(pdev);
  1156. struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
  1157. pci_stop_root_bus(bridge->bus);
  1158. pci_remove_root_bus(bridge->bus);
  1159. __brcm_pcie_remove(pcie);
  1160. return 0;
  1161. }
  1162. static const int pcie_offsets[] = {
  1163. [RGR1_SW_INIT_1] = 0x9210,
  1164. [EXT_CFG_INDEX] = 0x9000,
  1165. [EXT_CFG_DATA] = 0x9004,
  1166. };
  1167. static const int pcie_offsets_bmips_7425[] = {
  1168. [RGR1_SW_INIT_1] = 0x8010,
  1169. [EXT_CFG_INDEX] = 0x8300,
  1170. [EXT_CFG_DATA] = 0x8304,
  1171. };
  1172. static const struct pcie_cfg_data generic_cfg = {
  1173. .offsets = pcie_offsets,
  1174. .type = GENERIC,
  1175. .perst_set = brcm_pcie_perst_set_generic,
  1176. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
  1177. };
  1178. static const struct pcie_cfg_data bcm7425_cfg = {
  1179. .offsets = pcie_offsets_bmips_7425,
  1180. .type = BCM7425,
  1181. .perst_set = brcm_pcie_perst_set_generic,
  1182. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
  1183. };
  1184. static const struct pcie_cfg_data bcm7435_cfg = {
  1185. .offsets = pcie_offsets,
  1186. .type = BCM7435,
  1187. .perst_set = brcm_pcie_perst_set_generic,
  1188. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
  1189. };
  1190. static const struct pcie_cfg_data bcm4908_cfg = {
  1191. .offsets = pcie_offsets,
  1192. .type = BCM4908,
  1193. .perst_set = brcm_pcie_perst_set_4908,
  1194. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
  1195. };
  1196. static const int pcie_offset_bcm7278[] = {
  1197. [RGR1_SW_INIT_1] = 0xc010,
  1198. [EXT_CFG_INDEX] = 0x9000,
  1199. [EXT_CFG_DATA] = 0x9004,
  1200. };
  1201. static const struct pcie_cfg_data bcm7278_cfg = {
  1202. .offsets = pcie_offset_bcm7278,
  1203. .type = BCM7278,
  1204. .perst_set = brcm_pcie_perst_set_7278,
  1205. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
  1206. };
  1207. static const struct pcie_cfg_data bcm2711_cfg = {
  1208. .offsets = pcie_offsets,
  1209. .type = BCM2711,
  1210. .perst_set = brcm_pcie_perst_set_generic,
  1211. .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
  1212. };
  1213. static const struct of_device_id brcm_pcie_match[] = {
  1214. { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
  1215. { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
  1216. { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
  1217. { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
  1218. { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
  1219. { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
  1220. { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
  1221. { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
  1222. {},
  1223. };
  1224. static struct pci_ops brcm_pcie_ops = {
  1225. .map_bus = brcm_pcie_map_bus,
  1226. .read = pci_generic_config_read,
  1227. .write = pci_generic_config_write,
  1228. .add_bus = brcm_pcie_add_bus,
  1229. .remove_bus = brcm_pcie_remove_bus,
  1230. };
  1231. static struct pci_ops brcm7425_pcie_ops = {
  1232. .map_bus = brcm7425_pcie_map_bus,
  1233. .read = pci_generic_config_read32,
  1234. .write = pci_generic_config_write32,
  1235. .add_bus = brcm_pcie_add_bus,
  1236. .remove_bus = brcm_pcie_remove_bus,
  1237. };
  1238. static int brcm_pcie_probe(struct platform_device *pdev)
  1239. {
  1240. struct device_node *np = pdev->dev.of_node, *msi_np;
  1241. struct pci_host_bridge *bridge;
  1242. const struct pcie_cfg_data *data;
  1243. struct brcm_pcie *pcie;
  1244. int ret;
  1245. bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
  1246. if (!bridge)
  1247. return -ENOMEM;
  1248. data = of_device_get_match_data(&pdev->dev);
  1249. if (!data) {
  1250. pr_err("failed to look up compatible string\n");
  1251. return -EINVAL;
  1252. }
  1253. pcie = pci_host_bridge_priv(bridge);
  1254. pcie->dev = &pdev->dev;
  1255. pcie->np = np;
  1256. pcie->reg_offsets = data->offsets;
  1257. pcie->type = data->type;
  1258. pcie->perst_set = data->perst_set;
  1259. pcie->bridge_sw_init_set = data->bridge_sw_init_set;
  1260. pcie->base = devm_platform_ioremap_resource(pdev, 0);
  1261. if (IS_ERR(pcie->base))
  1262. return PTR_ERR(pcie->base);
  1263. pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
  1264. if (IS_ERR(pcie->clk))
  1265. return PTR_ERR(pcie->clk);
  1266. ret = of_pci_get_max_link_speed(np);
  1267. pcie->gen = (ret < 0) ? 0 : ret;
  1268. pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
  1269. ret = clk_prepare_enable(pcie->clk);
  1270. if (ret) {
  1271. dev_err(&pdev->dev, "could not enable clock\n");
  1272. return ret;
  1273. }
  1274. pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
  1275. if (IS_ERR(pcie->rescal)) {
  1276. clk_disable_unprepare(pcie->clk);
  1277. return PTR_ERR(pcie->rescal);
  1278. }
  1279. pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
  1280. if (IS_ERR(pcie->perst_reset)) {
  1281. clk_disable_unprepare(pcie->clk);
  1282. return PTR_ERR(pcie->perst_reset);
  1283. }
  1284. ret = reset_control_reset(pcie->rescal);
  1285. if (ret)
  1286. dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
  1287. ret = brcm_phy_start(pcie);
  1288. if (ret) {
  1289. reset_control_rearm(pcie->rescal);
  1290. clk_disable_unprepare(pcie->clk);
  1291. return ret;
  1292. }
  1293. ret = brcm_pcie_setup(pcie);
  1294. if (ret)
  1295. goto fail;
  1296. pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
  1297. if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
  1298. dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
  1299. ret = -ENODEV;
  1300. goto fail;
  1301. }
  1302. msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
  1303. if (pci_msi_enabled() && msi_np == pcie->np) {
  1304. ret = brcm_pcie_enable_msi(pcie);
  1305. if (ret) {
  1306. dev_err(pcie->dev, "probe of internal MSI failed");
  1307. goto fail;
  1308. }
  1309. }
  1310. bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
  1311. bridge->sysdata = pcie;
  1312. platform_set_drvdata(pdev, pcie);
  1313. ret = pci_host_probe(bridge);
  1314. if (!ret && !brcm_pcie_link_up(pcie))
  1315. ret = -ENODEV;
  1316. if (ret) {
  1317. brcm_pcie_remove(pdev);
  1318. return ret;
  1319. }
  1320. return 0;
  1321. fail:
  1322. __brcm_pcie_remove(pcie);
  1323. return ret;
  1324. }
  1325. MODULE_DEVICE_TABLE(of, brcm_pcie_match);
  1326. static const struct dev_pm_ops brcm_pcie_pm_ops = {
  1327. .suspend_noirq = brcm_pcie_suspend_noirq,
  1328. .resume_noirq = brcm_pcie_resume_noirq,
  1329. };
  1330. static struct platform_driver brcm_pcie_driver = {
  1331. .probe = brcm_pcie_probe,
  1332. .remove = brcm_pcie_remove,
  1333. .driver = {
  1334. .name = "brcm-pcie",
  1335. .of_match_table = brcm_pcie_match,
  1336. .pm = &brcm_pcie_pm_ops,
  1337. },
  1338. };
  1339. module_platform_driver(brcm_pcie_driver);
  1340. MODULE_LICENSE("GPL");
  1341. MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
  1342. MODULE_AUTHOR("Broadcom");