drm/amdgpu: SI support for VCE clock control

Port functionality from the Radeon driver to support
VCE clock control.

Signed-off-by: Alex Jivin <alex.jivin@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Alex Jivin
2020-06-24 12:41:14 -04:00
committed by Alex Deucher
parent 3b0627a4b6
commit fb40bceb6c
3 changed files with 167 additions and 1 deletions

View File

@@ -1650,6 +1650,130 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
{
unsigned i;
/* Make sure VCEPLL_CTLREQ is deasserted */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
mdelay(10);
/* Assert UPLL_CTLREQ */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
/* Wait for CTLACK and CTLACK2 to get asserted */
for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) {
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
break;
mdelay(10);
}
/* Deassert UPLL_CTLREQ */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
DRM_ERROR("Timeout setting UVD clocks!\n");
return -ETIMEDOUT;
}
return 0;
}
static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
int r;
/* Bypass evclk and ecclk with bclk */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
/* Put PLL in bypass mode */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
~VCEPLL_BYPASS_EN_MASK);
if (!evclk || !ecclk) {
/* Keep the Bypass mode, put PLL to sleep */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
return 0;
}
r = si_calc_upll_dividers(adev, evclk, ecclk, 125000, 250000,
16384, 0x03FFFFFF, 0, 128, 5,
&fb_div, &evclk_div, &ecclk_div);
if (r)
return r;
/* Set RESET_ANTI_MUX to 0 */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
/* Set VCO_MODE to 1 */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
~VCEPLL_VCO_MODE_MASK);
/* Toggle VCEPLL_SLEEP to 1 then back to 0 */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
/* Deassert VCEPLL_RESET */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
mdelay(1);
r = si_vce_send_vcepll_ctlreq(adev);
if (r)
return r;
/* Assert VCEPLL_RESET again */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
/* Disable spread spectrum. */
WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
/* Set feedback divider */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3,
VCEPLL_FB_DIV(fb_div),
~VCEPLL_FB_DIV_MASK);
/* Set ref divider to 0 */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
/* Set PDIV_A and PDIV_B */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
/* Give the PLL some time to settle */
mdelay(15);
/* Deassert PLL_RESET */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
mdelay(15);
/* Switch from bypass mode to normal mode */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
r = si_vce_send_vcepll_ctlreq(adev);
if (r)
return r;
/* Switch VCLK and DCLK selection */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
mdelay(100);
return 0;
}
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1660,7 +1784,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.set_vga_state = &si_vga_set_state,
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
.set_vce_clocks = &si_set_vce_clocks,
.get_pcie_lanes = &si_get_pcie_lanes,
.set_pcie_lanes = &si_set_pcie_lanes,
.get_config_memsize = &si_get_config_memsize,