pm-arm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ARM-specific support for Broadcom STB S2/S3/S5 power management
  4. *
  5. * S2: clock gate CPUs and as many peripherals as possible
  6. * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
  7. * self-refresh
  8. * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
  9. * treat this mode like a soft power-off, with wakeup allowed from AON
  10. *
  11. * Copyright © 2014-2017 Broadcom
  12. */
  13. #define pr_fmt(fmt) "brcmstb-pm: " fmt
  14. #include <linux/bitops.h>
  15. #include <linux/compiler.h>
  16. #include <linux/delay.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/io.h>
  21. #include <linux/ioport.h>
  22. #include <linux/kconfig.h>
  23. #include <linux/kernel.h>
  24. #include <linux/memblock.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <linux/panic_notifier.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm.h>
  31. #include <linux/printk.h>
  32. #include <linux/proc_fs.h>
  33. #include <linux/sizes.h>
  34. #include <linux/slab.h>
  35. #include <linux/sort.h>
  36. #include <linux/suspend.h>
  37. #include <linux/types.h>
  38. #include <linux/uaccess.h>
  39. #include <linux/soc/brcmstb/brcmstb.h>
  40. #include <asm/fncpy.h>
  41. #include <asm/setup.h>
  42. #include <asm/suspend.h>
  43. #include "pm.h"
  44. #include "aon_defs.h"
  45. #define SHIMPHY_DDR_PAD_CNTRL 0x8c
  46. /* Method #0 */
  47. #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
  48. #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
  49. /* Method #1 */
  50. #define PWRDWN_SEQ_NO_SEQUENCING 0
  51. #define PWRDWN_SEQ_HOLD_CHANNEL 1
  52. #define PWRDWN_SEQ_RESET_PLL 2
  53. #define PWRDWN_SEQ_POWERDOWN_PLL 3
  54. #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
  55. #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
  56. #define DDR_FORCE_CKE_RST_N BIT(3)
  57. #define DDR_PHY_RST_N BIT(2)
  58. #define DDR_PHY_CKE BIT(1)
  59. #define DDR_PHY_NO_CHANNEL 0xffffffff
  60. #define MAX_NUM_MEMC 3
  61. struct brcmstb_memc {
  62. void __iomem *ddr_phy_base;
  63. void __iomem *ddr_shimphy_base;
  64. void __iomem *ddr_ctrl;
  65. };
  66. struct brcmstb_pm_control {
  67. void __iomem *aon_ctrl_base;
  68. void __iomem *aon_sram;
  69. struct brcmstb_memc memcs[MAX_NUM_MEMC];
  70. void __iomem *boot_sram;
  71. size_t boot_sram_len;
  72. bool support_warm_boot;
  73. size_t pll_status_offset;
  74. int num_memc;
  75. struct brcmstb_s3_params *s3_params;
  76. dma_addr_t s3_params_pa;
  77. int s3entry_method;
  78. u32 warm_boot_offset;
  79. u32 phy_a_standby_ctrl_offs;
  80. u32 phy_b_standby_ctrl_offs;
  81. bool needs_ddr_pad;
  82. struct platform_device *pdev;
  83. };
  84. enum bsp_initiate_command {
  85. BSP_CLOCK_STOP = 0x00,
  86. BSP_GEN_RANDOM_KEY = 0x4A,
  87. BSP_RESTORE_RANDOM_KEY = 0x55,
  88. BSP_GEN_FIXED_KEY = 0x63,
  89. };
  90. #define PM_INITIATE 0x01
  91. #define PM_INITIATE_SUCCESS 0x00
  92. #define PM_INITIATE_FAIL 0xfe
  93. static struct brcmstb_pm_control ctrl;
  94. noinline int brcmstb_pm_s3_finish(void);
  95. static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
  96. void __iomem *ddr_phy_pll_status);
  97. static int brcmstb_init_sram(struct device_node *dn)
  98. {
  99. void __iomem *sram;
  100. struct resource res;
  101. int ret;
  102. ret = of_address_to_resource(dn, 0, &res);
  103. if (ret)
  104. return ret;
  105. /* Uncached, executable remapping of SRAM */
  106. sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
  107. if (!sram)
  108. return -ENOMEM;
  109. ctrl.boot_sram = sram;
  110. ctrl.boot_sram_len = resource_size(&res);
  111. return 0;
  112. }
  113. static const struct of_device_id sram_dt_ids[] = {
  114. { .compatible = "mmio-sram" },
  115. { /* sentinel */ }
  116. };
  117. static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
  118. {
  119. void __iomem *base = ctrl.aon_ctrl_base;
  120. int ret;
  121. int timeo = 1000 * 1000; /* 1 second */
  122. writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
  123. (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
  124. /* Go! */
  125. writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
  126. /*
  127. * If firmware doesn't support the 'ack', then just assume it's done
  128. * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
  129. */
  130. if (of_machine_is_compatible("brcm,bcm74371a0")) {
  131. (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
  132. mdelay(10);
  133. return 0;
  134. }
  135. for (;;) {
  136. ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
  137. if (!(ret & PM_INITIATE))
  138. break;
  139. if (timeo <= 0) {
  140. pr_err("error: timeout waiting for BSP (%x)\n", ret);
  141. break;
  142. }
  143. timeo -= 50;
  144. udelay(50);
  145. }
  146. return (ret & 0xff) != PM_INITIATE_SUCCESS;
  147. }
  148. static int brcmstb_pm_handshake(void)
  149. {
  150. void __iomem *base = ctrl.aon_ctrl_base;
  151. u32 tmp;
  152. int ret;
  153. /* BSP power handshake, v1 */
  154. tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
  155. tmp &= ~1UL;
  156. writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
  157. (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
  158. ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
  159. if (ret)
  160. pr_err("BSP handshake failed\n");
  161. /*
  162. * HACK: BSP may have internal race on the CLOCK_STOP command.
  163. * Avoid touching the BSP for a few milliseconds.
  164. */
  165. mdelay(3);
  166. return ret;
  167. }
  168. static inline void shimphy_set(u32 value, u32 mask)
  169. {
  170. int i;
  171. if (!ctrl.needs_ddr_pad)
  172. return;
  173. for (i = 0; i < ctrl.num_memc; i++) {
  174. u32 tmp;
  175. tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
  176. SHIMPHY_DDR_PAD_CNTRL);
  177. tmp = value | (tmp & mask);
  178. writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
  179. SHIMPHY_DDR_PAD_CNTRL);
  180. }
  181. wmb(); /* Complete sequence in order. */
  182. }
  183. static inline void ddr_ctrl_set(bool warmboot)
  184. {
  185. int i;
  186. for (i = 0; i < ctrl.num_memc; i++) {
  187. u32 tmp;
  188. tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
  189. ctrl.warm_boot_offset);
  190. if (warmboot)
  191. tmp |= 1;
  192. else
  193. tmp &= ~1; /* Cold boot */
  194. writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
  195. ctrl.warm_boot_offset);
  196. }
  197. /* Complete sequence in order */
  198. wmb();
  199. }
  200. static inline void s3entry_method0(void)
  201. {
  202. shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
  203. 0xffffffff);
  204. }
  205. static inline void s3entry_method1(void)
  206. {
  207. /*
  208. * S3 Entry Sequence
  209. * -----------------
  210. * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
  211. * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
  212. */
  213. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  214. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  215. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  216. ddr_ctrl_set(true);
  217. }
  218. static inline void s5entry_method1(void)
  219. {
  220. int i;
  221. /*
  222. * S5 Entry Sequence
  223. * -----------------
  224. * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
  225. * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
  226. * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
  227. * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
  228. */
  229. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  230. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  231. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  232. ddr_ctrl_set(false);
  233. for (i = 0; i < ctrl.num_memc; i++) {
  234. u32 tmp;
  235. /* Step 3: Channel A (RST_N = CKE = 0) */
  236. tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
  237. ctrl.phy_a_standby_ctrl_offs);
  238. tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
  239. writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
  240. ctrl.phy_a_standby_ctrl_offs);
  241. /* Step 3: Channel B? */
  242. if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
  243. tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
  244. ctrl.phy_b_standby_ctrl_offs);
  245. tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
  246. writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
  247. ctrl.phy_b_standby_ctrl_offs);
  248. }
  249. }
  250. /* Must complete */
  251. wmb();
  252. }
  253. /*
  254. * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
  255. * into a low-power mode
  256. */
  257. static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
  258. {
  259. void __iomem *base = ctrl.aon_ctrl_base;
  260. if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
  261. s5entry_method1();
  262. /* pm_start_pwrdn transition 0->1 */
  263. writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
  264. if (!onewrite) {
  265. (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
  266. writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
  267. (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
  268. }
  269. wfi();
  270. }
  271. /* Support S5 cold boot out of "poweroff" */
  272. static void brcmstb_pm_poweroff(void)
  273. {
  274. brcmstb_pm_handshake();
  275. /* Clear magic S3 warm-boot value */
  276. writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  277. (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  278. /* Skip wait-for-interrupt signal; just use a countdown */
  279. writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
  280. (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
  281. if (ctrl.s3entry_method == 1) {
  282. shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
  283. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  284. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  285. ddr_ctrl_set(false);
  286. brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
  287. return; /* We should never actually get here */
  288. }
  289. brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
  290. }
  291. static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
  292. {
  293. unsigned int size = ALIGN(len, FNCPY_ALIGN);
  294. if (ctrl.boot_sram_len < size) {
  295. pr_err("standby code will not fit in SRAM\n");
  296. return NULL;
  297. }
  298. return fncpy(ctrl.boot_sram, fn, size);
  299. }
  300. /*
  301. * S2 suspend/resume picks up where we left off, so we must execute carefully
  302. * from SRAM, in order to allow DDR to come back up safely before we continue.
  303. */
  304. static int brcmstb_pm_s2(void)
  305. {
  306. /* A previous S3 can set a value hazardous to S2, so make sure. */
  307. if (ctrl.s3entry_method == 1) {
  308. shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
  309. SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
  310. ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
  311. ddr_ctrl_set(false);
  312. }
  313. brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
  314. brcmstb_pm_do_s2_sz);
  315. if (!brcmstb_pm_do_s2_sram)
  316. return -EINVAL;
  317. return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
  318. ctrl.memcs[0].ddr_phy_base +
  319. ctrl.pll_status_offset);
  320. }
  321. /*
  322. * This function is called on a new stack, so don't allow inlining (which will
  323. * generate stack references on the old stack). It cannot be made static because
  324. * it is referenced from brcmstb_pm_s3()
  325. */
  326. noinline int brcmstb_pm_s3_finish(void)
  327. {
  328. struct brcmstb_s3_params *params = ctrl.s3_params;
  329. dma_addr_t params_pa = ctrl.s3_params_pa;
  330. phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
  331. enum bsp_initiate_command cmd;
  332. u32 flags;
  333. /*
  334. * Clear parameter structure, but not DTU area, which has already been
  335. * filled in. We know DTU is a the end, so we can just subtract its
  336. * size.
  337. */
  338. memset(params, 0, sizeof(*params) - sizeof(params->dtu));
  339. flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  340. flags &= S3_BOOTLOADER_RESERVED;
  341. flags |= S3_FLAG_NO_MEM_VERIFY;
  342. flags |= S3_FLAG_LOAD_RANDKEY;
  343. /* Load random / fixed key */
  344. if (flags & S3_FLAG_LOAD_RANDKEY)
  345. cmd = BSP_GEN_RANDOM_KEY;
  346. else
  347. cmd = BSP_GEN_FIXED_KEY;
  348. if (do_bsp_initiate_command(cmd)) {
  349. pr_info("key loading failed\n");
  350. return -EIO;
  351. }
  352. params->magic = BRCMSTB_S3_MAGIC;
  353. params->reentry = reentry;
  354. /* No more writes to DRAM */
  355. flush_cache_all();
  356. flags |= BRCMSTB_S3_MAGIC_SHORT;
  357. writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
  358. writel_relaxed(lower_32_bits(params_pa),
  359. ctrl.aon_sram + AON_REG_CONTROL_LOW);
  360. writel_relaxed(upper_32_bits(params_pa),
  361. ctrl.aon_sram + AON_REG_CONTROL_HIGH);
  362. switch (ctrl.s3entry_method) {
  363. case 0:
  364. s3entry_method0();
  365. brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
  366. break;
  367. case 1:
  368. s3entry_method1();
  369. brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
  370. break;
  371. default:
  372. return -EINVAL;
  373. }
  374. /* Must have been interrupted from wfi()? */
  375. return -EINTR;
  376. }
  377. static int brcmstb_pm_do_s3(unsigned long sp)
  378. {
  379. unsigned long save_sp;
  380. int ret;
  381. asm volatile (
  382. "mov %[save], sp\n"
  383. "mov sp, %[new]\n"
  384. "bl brcmstb_pm_s3_finish\n"
  385. "mov %[ret], r0\n"
  386. "mov %[new], sp\n"
  387. "mov sp, %[save]\n"
  388. : [save] "=&r" (save_sp), [ret] "=&r" (ret)
  389. : [new] "r" (sp)
  390. );
  391. return ret;
  392. }
  393. static int brcmstb_pm_s3(void)
  394. {
  395. void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
  396. return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
  397. }
  398. static int brcmstb_pm_standby(bool deep_standby)
  399. {
  400. int ret;
  401. if (brcmstb_pm_handshake())
  402. return -EIO;
  403. if (deep_standby)
  404. ret = brcmstb_pm_s3();
  405. else
  406. ret = brcmstb_pm_s2();
  407. if (ret)
  408. pr_err("%s: standby failed\n", __func__);
  409. return ret;
  410. }
  411. static int brcmstb_pm_enter(suspend_state_t state)
  412. {
  413. int ret = -EINVAL;
  414. switch (state) {
  415. case PM_SUSPEND_STANDBY:
  416. ret = brcmstb_pm_standby(false);
  417. break;
  418. case PM_SUSPEND_MEM:
  419. ret = brcmstb_pm_standby(true);
  420. break;
  421. }
  422. return ret;
  423. }
  424. static int brcmstb_pm_valid(suspend_state_t state)
  425. {
  426. switch (state) {
  427. case PM_SUSPEND_STANDBY:
  428. return true;
  429. case PM_SUSPEND_MEM:
  430. return ctrl.support_warm_boot;
  431. default:
  432. return false;
  433. }
  434. }
  435. static const struct platform_suspend_ops brcmstb_pm_ops = {
  436. .enter = brcmstb_pm_enter,
  437. .valid = brcmstb_pm_valid,
  438. };
  439. static const struct of_device_id aon_ctrl_dt_ids[] = {
  440. { .compatible = "brcm,brcmstb-aon-ctrl" },
  441. {}
  442. };
  443. struct ddr_phy_ofdata {
  444. bool supports_warm_boot;
  445. size_t pll_status_offset;
  446. int s3entry_method;
  447. u32 warm_boot_offset;
  448. u32 phy_a_standby_ctrl_offs;
  449. u32 phy_b_standby_ctrl_offs;
  450. };
  451. static struct ddr_phy_ofdata ddr_phy_71_1 = {
  452. .supports_warm_boot = true,
  453. .pll_status_offset = 0x0c,
  454. .s3entry_method = 1,
  455. .warm_boot_offset = 0x2c,
  456. .phy_a_standby_ctrl_offs = 0x198,
  457. .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
  458. };
  459. static struct ddr_phy_ofdata ddr_phy_72_0 = {
  460. .supports_warm_boot = true,
  461. .pll_status_offset = 0x10,
  462. .s3entry_method = 1,
  463. .warm_boot_offset = 0x40,
  464. .phy_a_standby_ctrl_offs = 0x2a4,
  465. .phy_b_standby_ctrl_offs = 0x8a4
  466. };
  467. static struct ddr_phy_ofdata ddr_phy_225_1 = {
  468. .supports_warm_boot = false,
  469. .pll_status_offset = 0x4,
  470. .s3entry_method = 0
  471. };
  472. static struct ddr_phy_ofdata ddr_phy_240_1 = {
  473. .supports_warm_boot = true,
  474. .pll_status_offset = 0x4,
  475. .s3entry_method = 0
  476. };
  477. static const struct of_device_id ddr_phy_dt_ids[] = {
  478. {
  479. .compatible = "brcm,brcmstb-ddr-phy-v71.1",
  480. .data = &ddr_phy_71_1,
  481. },
  482. {
  483. .compatible = "brcm,brcmstb-ddr-phy-v72.0",
  484. .data = &ddr_phy_72_0,
  485. },
  486. {
  487. .compatible = "brcm,brcmstb-ddr-phy-v225.1",
  488. .data = &ddr_phy_225_1,
  489. },
  490. {
  491. .compatible = "brcm,brcmstb-ddr-phy-v240.1",
  492. .data = &ddr_phy_240_1,
  493. },
  494. {
  495. /* Same as v240.1, for the registers we care about */
  496. .compatible = "brcm,brcmstb-ddr-phy-v240.2",
  497. .data = &ddr_phy_240_1,
  498. },
  499. {}
  500. };
  501. struct ddr_seq_ofdata {
  502. bool needs_ddr_pad;
  503. u32 warm_boot_offset;
  504. };
  505. static const struct ddr_seq_ofdata ddr_seq_b22 = {
  506. .needs_ddr_pad = false,
  507. .warm_boot_offset = 0x2c,
  508. };
  509. static const struct ddr_seq_ofdata ddr_seq = {
  510. .needs_ddr_pad = true,
  511. };
  512. static const struct of_device_id ddr_shimphy_dt_ids[] = {
  513. { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
  514. {}
  515. };
  516. static const struct of_device_id brcmstb_memc_of_match[] = {
  517. {
  518. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
  519. .data = &ddr_seq,
  520. },
  521. {
  522. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
  523. .data = &ddr_seq_b22,
  524. },
  525. {
  526. .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
  527. .data = &ddr_seq_b22,
  528. },
  529. {
  530. .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
  531. .data = &ddr_seq_b22,
  532. },
  533. {
  534. .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
  535. .data = &ddr_seq_b22,
  536. },
  537. {
  538. .compatible = "brcm,brcmstb-memc-ddr",
  539. .data = &ddr_seq,
  540. },
  541. {},
  542. };
  543. static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
  544. int index, const void **ofdata)
  545. {
  546. struct device_node *dn;
  547. const struct of_device_id *match;
  548. dn = of_find_matching_node_and_match(NULL, matches, &match);
  549. if (!dn)
  550. return ERR_PTR(-EINVAL);
  551. if (ofdata)
  552. *ofdata = match->data;
  553. return of_io_request_and_map(dn, index, dn->full_name);
  554. }
  555. /*
  556. * The AON is a small domain in the SoC that can retain its state across
  557. * various system wide sleep states and specific reset conditions; the
  558. * AON DATA RAM is a small RAM of a few words (< 1KB) which can store
  559. * persistent information across such events.
  560. *
  561. * The purpose of the below panic notifier is to help with notifying
  562. * the bootloader that a panic occurred and so that it should try its
  563. * best to preserve the DRAM contents holding that buffer for recovery
  564. * by the kernel as opposed to wiping out DRAM clean again.
  565. *
  566. * Reference: comment from Florian Fainelli, at
  567. * https://lore.kernel.org/lkml/[email protected]
  568. */
  569. static int brcmstb_pm_panic_notify(struct notifier_block *nb,
  570. unsigned long action, void *data)
  571. {
  572. writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
  573. return NOTIFY_DONE;
  574. }
  575. static struct notifier_block brcmstb_pm_panic_nb = {
  576. .notifier_call = brcmstb_pm_panic_notify,
  577. };
  578. static int brcmstb_pm_probe(struct platform_device *pdev)
  579. {
  580. const struct ddr_phy_ofdata *ddr_phy_data;
  581. const struct ddr_seq_ofdata *ddr_seq_data;
  582. const struct of_device_id *of_id = NULL;
  583. struct device_node *dn;
  584. void __iomem *base;
  585. int ret, i, s;
  586. /* AON ctrl registers */
  587. base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
  588. if (IS_ERR(base)) {
  589. pr_err("error mapping AON_CTRL\n");
  590. ret = PTR_ERR(base);
  591. goto aon_err;
  592. }
  593. ctrl.aon_ctrl_base = base;
  594. /* AON SRAM registers */
  595. base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
  596. if (IS_ERR(base)) {
  597. /* Assume standard offset */
  598. ctrl.aon_sram = ctrl.aon_ctrl_base +
  599. AON_CTRL_SYSTEM_DATA_RAM_OFS;
  600. s = 0;
  601. } else {
  602. ctrl.aon_sram = base;
  603. s = 1;
  604. }
  605. writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
  606. /* DDR PHY registers */
  607. base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
  608. (const void **)&ddr_phy_data);
  609. if (IS_ERR(base)) {
  610. pr_err("error mapping DDR PHY\n");
  611. ret = PTR_ERR(base);
  612. goto ddr_phy_err;
  613. }
  614. ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
  615. ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
  616. /* Only need DDR PHY 0 for now? */
  617. ctrl.memcs[0].ddr_phy_base = base;
  618. ctrl.s3entry_method = ddr_phy_data->s3entry_method;
  619. ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
  620. ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
  621. /*
  622. * Slightly gross to use the phy ver to get a memc,
  623. * offset but that is the only versioned things so far
  624. * we can test for.
  625. */
  626. ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
  627. /* DDR SHIM-PHY registers */
  628. for_each_matching_node(dn, ddr_shimphy_dt_ids) {
  629. i = ctrl.num_memc;
  630. if (i >= MAX_NUM_MEMC) {
  631. of_node_put(dn);
  632. pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
  633. break;
  634. }
  635. base = of_io_request_and_map(dn, 0, dn->full_name);
  636. if (IS_ERR(base)) {
  637. of_node_put(dn);
  638. if (!ctrl.support_warm_boot)
  639. break;
  640. pr_err("error mapping DDR SHIMPHY %d\n", i);
  641. ret = PTR_ERR(base);
  642. goto ddr_shimphy_err;
  643. }
  644. ctrl.memcs[i].ddr_shimphy_base = base;
  645. ctrl.num_memc++;
  646. }
  647. /* Sequencer DRAM Param and Control Registers */
  648. i = 0;
  649. for_each_matching_node(dn, brcmstb_memc_of_match) {
  650. base = of_iomap(dn, 0);
  651. if (!base) {
  652. of_node_put(dn);
  653. pr_err("error mapping DDR Sequencer %d\n", i);
  654. ret = -ENOMEM;
  655. goto brcmstb_memc_err;
  656. }
  657. of_id = of_match_node(brcmstb_memc_of_match, dn);
  658. if (!of_id) {
  659. iounmap(base);
  660. of_node_put(dn);
  661. ret = -EINVAL;
  662. goto brcmstb_memc_err;
  663. }
  664. ddr_seq_data = of_id->data;
  665. ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
  666. /* Adjust warm boot offset based on the DDR sequencer */
  667. if (ddr_seq_data->warm_boot_offset)
  668. ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
  669. ctrl.memcs[i].ddr_ctrl = base;
  670. i++;
  671. }
  672. pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
  673. ctrl.support_warm_boot, ctrl.s3entry_method,
  674. ctrl.warm_boot_offset);
  675. dn = of_find_matching_node(NULL, sram_dt_ids);
  676. if (!dn) {
  677. pr_err("SRAM not found\n");
  678. ret = -EINVAL;
  679. goto brcmstb_memc_err;
  680. }
  681. ret = brcmstb_init_sram(dn);
  682. of_node_put(dn);
  683. if (ret) {
  684. pr_err("error setting up SRAM for PM\n");
  685. goto brcmstb_memc_err;
  686. }
  687. ctrl.pdev = pdev;
  688. ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
  689. if (!ctrl.s3_params) {
  690. ret = -ENOMEM;
  691. goto s3_params_err;
  692. }
  693. ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
  694. sizeof(*ctrl.s3_params),
  695. DMA_TO_DEVICE);
  696. if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
  697. pr_err("error mapping DMA memory\n");
  698. ret = -ENOMEM;
  699. goto out;
  700. }
  701. atomic_notifier_chain_register(&panic_notifier_list,
  702. &brcmstb_pm_panic_nb);
  703. pm_power_off = brcmstb_pm_poweroff;
  704. suspend_set_ops(&brcmstb_pm_ops);
  705. return 0;
  706. out:
  707. kfree(ctrl.s3_params);
  708. s3_params_err:
  709. iounmap(ctrl.boot_sram);
  710. brcmstb_memc_err:
  711. for (i--; i >= 0; i--)
  712. iounmap(ctrl.memcs[i].ddr_ctrl);
  713. ddr_shimphy_err:
  714. for (i = 0; i < ctrl.num_memc; i++)
  715. iounmap(ctrl.memcs[i].ddr_shimphy_base);
  716. iounmap(ctrl.memcs[0].ddr_phy_base);
  717. ddr_phy_err:
  718. iounmap(ctrl.aon_ctrl_base);
  719. if (s)
  720. iounmap(ctrl.aon_sram);
  721. aon_err:
  722. pr_warn("PM: initialization failed with code %d\n", ret);
  723. return ret;
  724. }
  725. static struct platform_driver brcmstb_pm_driver = {
  726. .driver = {
  727. .name = "brcmstb-pm",
  728. .of_match_table = aon_ctrl_dt_ids,
  729. },
  730. };
  731. static int __init brcmstb_pm_init(void)
  732. {
  733. return platform_driver_probe(&brcmstb_pm_driver,
  734. brcmstb_pm_probe);
  735. }
  736. module_init(brcmstb_pm_init);