vlv_suspend.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright © 2020 Intel Corporation
  4. */
  5. #include <linux/string_helpers.h>
  6. #include <linux/kernel.h>
  7. #include <drm/drm_print.h>
  8. #include "i915_drv.h"
  9. #include "i915_reg.h"
  10. #include "i915_trace.h"
  11. #include "i915_utils.h"
  12. #include "intel_pm.h"
  13. #include "vlv_suspend.h"
  14. #include "gt/intel_gt_regs.h"
  15. struct vlv_s0ix_state {
  16. /* GAM */
  17. u32 wr_watermark;
  18. u32 gfx_prio_ctrl;
  19. u32 arb_mode;
  20. u32 gfx_pend_tlb0;
  21. u32 gfx_pend_tlb1;
  22. u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
  23. u32 media_max_req_count;
  24. u32 gfx_max_req_count;
  25. u32 render_hwsp;
  26. u32 ecochk;
  27. u32 bsd_hwsp;
  28. u32 blt_hwsp;
  29. u32 tlb_rd_addr;
  30. /* MBC */
  31. u32 g3dctl;
  32. u32 gsckgctl;
  33. u32 mbctl;
  34. /* GCP */
  35. u32 ucgctl1;
  36. u32 ucgctl3;
  37. u32 rcgctl1;
  38. u32 rcgctl2;
  39. u32 rstctl;
  40. u32 misccpctl;
  41. /* GPM */
  42. u32 gfxpause;
  43. u32 rpdeuhwtc;
  44. u32 rpdeuc;
  45. u32 ecobus;
  46. u32 pwrdwnupctl;
  47. u32 rp_down_timeout;
  48. u32 rp_deucsw;
  49. u32 rcubmabdtmr;
  50. u32 rcedata;
  51. u32 spare2gh;
  52. /* Display 1 CZ domain */
  53. u32 gt_imr;
  54. u32 gt_ier;
  55. u32 pm_imr;
  56. u32 pm_ier;
  57. u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
  58. /* GT SA CZ domain */
  59. u32 tilectl;
  60. u32 gt_fifoctl;
  61. u32 gtlc_wake_ctrl;
  62. u32 gtlc_survive;
  63. u32 pmwgicz;
  64. /* Display 2 CZ domain */
  65. u32 gu_ctl0;
  66. u32 gu_ctl1;
  67. u32 pcbr;
  68. u32 clock_gate_dis2;
  69. };
  70. /*
  71. * Save all Gunit registers that may be lost after a D3 and a subsequent
  72. * S0i[R123] transition. The list of registers needing a save/restore is
  73. * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
  74. * registers in the following way:
  75. * - Driver: saved/restored by the driver
  76. * - Punit : saved/restored by the Punit firmware
  77. * - No, w/o marking: no need to save/restore, since the register is R/O or
  78. * used internally by the HW in a way that doesn't depend
  79. * keeping the content across a suspend/resume.
  80. * - Debug : used for debugging
  81. *
  82. * We save/restore all registers marked with 'Driver', with the following
  83. * exceptions:
  84. * - Registers out of use, including also registers marked with 'Debug'.
  85. * These have no effect on the driver's operation, so we don't save/restore
  86. * them to reduce the overhead.
  87. * - Registers that are fully setup by an initialization function called from
  88. * the resume path. For example many clock gating and RPS/RC6 registers.
  89. * - Registers that provide the right functionality with their reset defaults.
  90. *
  91. * TODO: Except for registers that based on the above 3 criteria can be safely
  92. * ignored, we save/restore all others, practically treating the HW context as
  93. * a black-box for the driver. Further investigation is needed to reduce the
  94. * saved/restored registers even further, by following the same 3 criteria.
  95. */
  96. static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
  97. {
  98. struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
  99. struct intel_uncore *uncore = &i915->uncore;
  100. int i;
  101. if (!s)
  102. return;
  103. /* GAM 0x4000-0x4770 */
  104. s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
  105. s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
  106. s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
  107. s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
  108. s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
  109. for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  110. s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
  111. s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
  112. s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
  113. s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
  114. s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
  115. s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
  116. s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
  117. s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
  118. /* MBC 0x9024-0x91D0, 0x8500 */
  119. s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
  120. s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
  121. s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
  122. /* GCP 0x9400-0x9424, 0x8100-0x810C */
  123. s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
  124. s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
  125. s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
  126. s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
  127. s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
  128. s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
  129. /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  130. s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
  131. s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
  132. s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
  133. s->ecobus = intel_uncore_read(uncore, ECOBUS);
  134. s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
  135. s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
  136. s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
  137. s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
  138. s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
  139. s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
  140. /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  141. s->gt_imr = intel_uncore_read(uncore, GTIMR);
  142. s->gt_ier = intel_uncore_read(uncore, GTIER);
  143. s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
  144. s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
  145. for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  146. s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
  147. /* GT SA CZ domain, 0x100000-0x138124 */
  148. s->tilectl = intel_uncore_read(uncore, TILECTL);
  149. s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
  150. s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
  151. s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
  152. s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
  153. /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  154. s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
  155. s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
  156. s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
  157. s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
  158. /*
  159. * Not saving any of:
  160. * DFT, 0x9800-0x9EC0
  161. * SARB, 0xB000-0xB1FC
  162. * GAC, 0x5208-0x524C, 0x14000-0x14C000
  163. * PCI CFG
  164. */
  165. }
  166. static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
  167. {
  168. struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
  169. struct intel_uncore *uncore = &i915->uncore;
  170. u32 val;
  171. int i;
  172. if (!s)
  173. return;
  174. /* GAM 0x4000-0x4770 */
  175. intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
  176. intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
  177. intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
  178. intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
  179. intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
  180. for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
  181. intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
  182. intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
  183. intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
  184. intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
  185. intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
  186. intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
  187. intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
  188. intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
  189. /* MBC 0x9024-0x91D0, 0x8500 */
  190. intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
  191. intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
  192. intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
  193. /* GCP 0x9400-0x9424, 0x8100-0x810C */
  194. intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
  195. intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
  196. intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
  197. intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
  198. intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
  199. intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
  200. /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
  201. intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
  202. intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
  203. intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
  204. intel_uncore_write(uncore, ECOBUS, s->ecobus);
  205. intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
  206. intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
  207. intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
  208. intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
  209. intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
  210. intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
  211. /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
  212. intel_uncore_write(uncore, GTIMR, s->gt_imr);
  213. intel_uncore_write(uncore, GTIER, s->gt_ier);
  214. intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
  215. intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
  216. for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
  217. intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
  218. /* GT SA CZ domain, 0x100000-0x138124 */
  219. intel_uncore_write(uncore, TILECTL, s->tilectl);
  220. intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
  221. /*
  222. * Preserve the GT allow wake and GFX force clock bit, they are not
  223. * be restored, as they are used to control the s0ix suspend/resume
  224. * sequence by the caller.
  225. */
  226. val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
  227. val &= VLV_GTLC_ALLOWWAKEREQ;
  228. val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
  229. intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
  230. val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
  231. val &= VLV_GFX_CLK_FORCE_ON_BIT;
  232. val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
  233. intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
  234. intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
  235. /* Gunit-Display CZ domain, 0x182028-0x1821CF */
  236. intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
  237. intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
  238. intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
  239. intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
  240. }
  241. static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
  242. u32 mask, u32 val)
  243. {
  244. i915_reg_t reg = VLV_GTLC_PW_STATUS;
  245. u32 reg_value;
  246. int ret;
  247. /* The HW does not like us polling for PW_STATUS frequently, so
  248. * use the sleeping loop rather than risk the busy spin within
  249. * intel_wait_for_register().
  250. *
  251. * Transitioning between RC6 states should be at most 2ms (see
  252. * valleyview_enable_rps) so use a 3ms timeout.
  253. */
  254. ret = wait_for(((reg_value =
  255. intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
  256. == val, 3);
  257. /* just trace the final value */
  258. trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
  259. return ret;
  260. }
  261. static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
  262. {
  263. struct intel_uncore *uncore = &i915->uncore;
  264. u32 val;
  265. int err;
  266. val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
  267. val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
  268. if (force_on)
  269. val |= VLV_GFX_CLK_FORCE_ON_BIT;
  270. intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
  271. if (!force_on)
  272. return 0;
  273. err = intel_wait_for_register(uncore,
  274. VLV_GTLC_SURVIVABILITY_REG,
  275. VLV_GFX_CLK_STATUS_BIT,
  276. VLV_GFX_CLK_STATUS_BIT,
  277. 20);
  278. if (err)
  279. drm_err(&i915->drm,
  280. "timeout waiting for GFX clock force-on (%08x)\n",
  281. intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
  282. return err;
  283. }
  284. static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
  285. {
  286. struct intel_uncore *uncore = &i915->uncore;
  287. u32 mask;
  288. u32 val;
  289. int err;
  290. val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
  291. val &= ~VLV_GTLC_ALLOWWAKEREQ;
  292. if (allow)
  293. val |= VLV_GTLC_ALLOWWAKEREQ;
  294. intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
  295. intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
  296. mask = VLV_GTLC_ALLOWWAKEACK;
  297. val = allow ? mask : 0;
  298. err = vlv_wait_for_pw_status(i915, mask, val);
  299. if (err)
  300. drm_err(&i915->drm, "timeout disabling GT waking\n");
  301. return err;
  302. }
  303. static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
  304. bool wait_for_on)
  305. {
  306. u32 mask;
  307. u32 val;
  308. mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
  309. val = wait_for_on ? mask : 0;
  310. /*
  311. * RC6 transitioning can be delayed up to 2 msec (see
  312. * valleyview_enable_rps), use 3 msec for safety.
  313. *
  314. * This can fail to turn off the rc6 if the GPU is stuck after a failed
  315. * reset and we are trying to force the machine to sleep.
  316. */
  317. if (vlv_wait_for_pw_status(dev_priv, mask, val))
  318. drm_dbg(&dev_priv->drm,
  319. "timeout waiting for GT wells to go %s\n",
  320. str_on_off(wait_for_on));
  321. }
  322. static void vlv_check_no_gt_access(struct drm_i915_private *i915)
  323. {
  324. struct intel_uncore *uncore = &i915->uncore;
  325. if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
  326. return;
  327. drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
  328. intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
  329. }
  330. int vlv_suspend_complete(struct drm_i915_private *dev_priv)
  331. {
  332. u32 mask;
  333. int err;
  334. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
  335. return 0;
  336. /*
  337. * Bspec defines the following GT well on flags as debug only, so
  338. * don't treat them as hard failures.
  339. */
  340. vlv_wait_for_gt_wells(dev_priv, false);
  341. mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
  342. drm_WARN_ON(&dev_priv->drm,
  343. (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
  344. vlv_check_no_gt_access(dev_priv);
  345. err = vlv_force_gfx_clock(dev_priv, true);
  346. if (err)
  347. goto err1;
  348. err = vlv_allow_gt_wake(dev_priv, false);
  349. if (err)
  350. goto err2;
  351. vlv_save_gunit_s0ix_state(dev_priv);
  352. err = vlv_force_gfx_clock(dev_priv, false);
  353. if (err)
  354. goto err2;
  355. return 0;
  356. err2:
  357. /* For safety always re-enable waking and disable gfx clock forcing */
  358. vlv_allow_gt_wake(dev_priv, true);
  359. err1:
  360. vlv_force_gfx_clock(dev_priv, false);
  361. return err;
  362. }
  363. int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
  364. {
  365. int err;
  366. int ret;
  367. if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
  368. return 0;
  369. /*
  370. * If any of the steps fail just try to continue, that's the best we
  371. * can do at this point. Return the first error code (which will also
  372. * leave RPM permanently disabled).
  373. */
  374. ret = vlv_force_gfx_clock(dev_priv, true);
  375. vlv_restore_gunit_s0ix_state(dev_priv);
  376. err = vlv_allow_gt_wake(dev_priv, true);
  377. if (!ret)
  378. ret = err;
  379. err = vlv_force_gfx_clock(dev_priv, false);
  380. if (!ret)
  381. ret = err;
  382. vlv_check_no_gt_access(dev_priv);
  383. if (rpm_resume)
  384. intel_init_clock_gating(dev_priv);
  385. return ret;
  386. }
  387. int vlv_suspend_init(struct drm_i915_private *i915)
  388. {
  389. if (!IS_VALLEYVIEW(i915))
  390. return 0;
  391. /* we write all the values in the struct, so no need to zero it out */
  392. i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
  393. GFP_KERNEL);
  394. if (!i915->vlv_s0ix_state)
  395. return -ENOMEM;
  396. return 0;
  397. }
  398. void vlv_suspend_cleanup(struct drm_i915_private *i915)
  399. {
  400. if (!i915->vlv_s0ix_state)
  401. return;
  402. kfree(i915->vlv_s0ix_state);
  403. i915->vlv_s0ix_state = NULL;
  404. }