drm/amdgpu/sdma4: APUs do not have a page queue
Don't use the paging queue on APUs. Tested-by: Tom St Denis <tom.stdenis@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -51,6 +51,7 @@ struct amdgpu_sdma {
|
|||||||
struct amdgpu_irq_src illegal_inst_irq;
|
struct amdgpu_irq_src illegal_inst_irq;
|
||||||
int num_instances;
|
int num_instances;
|
||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
|
bool has_page_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -746,6 +746,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
|
|||||||
if (enable == false) {
|
if (enable == false) {
|
||||||
sdma_v4_0_gfx_stop(adev);
|
sdma_v4_0_gfx_stop(adev);
|
||||||
sdma_v4_0_rlc_stop(adev);
|
sdma_v4_0_rlc_stop(adev);
|
||||||
|
if (adev->sdma.has_page_queue)
|
||||||
sdma_v4_0_page_stop(adev);
|
sdma_v4_0_page_stop(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1115,6 +1116,7 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||||
sdma_v4_0_gfx_resume(adev, i);
|
sdma_v4_0_gfx_resume(adev, i);
|
||||||
|
if (adev->sdma.has_page_queue)
|
||||||
sdma_v4_0_page_resume(adev, i);
|
sdma_v4_0_page_resume(adev, i);
|
||||||
|
|
||||||
/* set utc l1 enable flag always to 1 */
|
/* set utc l1 enable flag always to 1 */
|
||||||
@@ -1457,10 +1459,13 @@ static int sdma_v4_0_early_init(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_RAVEN)
|
if (adev->asic_type == CHIP_RAVEN) {
|
||||||
adev->sdma.num_instances = 1;
|
adev->sdma.num_instances = 1;
|
||||||
else
|
adev->sdma.has_page_queue = false;
|
||||||
|
} else {
|
||||||
adev->sdma.num_instances = 2;
|
adev->sdma.num_instances = 2;
|
||||||
|
adev->sdma.has_page_queue = true;
|
||||||
|
}
|
||||||
|
|
||||||
sdma_v4_0_set_ring_funcs(adev);
|
sdma_v4_0_set_ring_funcs(adev);
|
||||||
sdma_v4_0_set_buffer_funcs(adev);
|
sdma_v4_0_set_buffer_funcs(adev);
|
||||||
@@ -1522,6 +1527,7 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
if (adev->sdma.has_page_queue) {
|
||||||
ring = &adev->sdma.instance[i].page;
|
ring = &adev->sdma.instance[i].page;
|
||||||
ring->ring_obj = NULL;
|
ring->ring_obj = NULL;
|
||||||
ring->use_doorbell = false;
|
ring->use_doorbell = false;
|
||||||
@@ -1535,6 +1541,7 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@@ -1546,6 +1553,7 @@ static int sdma_v4_0_sw_fini(void *handle)
|
|||||||
|
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||||
|
if (adev->sdma.has_page_queue)
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].page);
|
amdgpu_ring_fini(&adev->sdma.instance[i].page);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1955,9 +1963,11 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
|
|||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
|
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
|
||||||
adev->sdma.instance[i].ring.me = i;
|
adev->sdma.instance[i].ring.me = i;
|
||||||
|
if (adev->sdma.has_page_queue) {
|
||||||
adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
|
adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
|
||||||
adev->sdma.instance[i].page.me = i;
|
adev->sdma.instance[i].page.me = i;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
|
||||||
@@ -2056,7 +2066,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
if (adev->sdma.has_page_queue)
|
||||||
sched = &adev->sdma.instance[i].page.sched;
|
sched = &adev->sdma.instance[i].page.sched;
|
||||||
|
else
|
||||||
|
sched = &adev->sdma.instance[i].ring.sched;
|
||||||
adev->vm_manager.vm_pte_rqs[i] =
|
adev->vm_manager.vm_pte_rqs[i] =
|
||||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user