drm/amdgpu: Separate vf2pf work item init from virt data exchange
[ Upstream commit 892deb48269c65376f3eeb5b4c032ff2c2979bd7 ] We want to be able to call virt data exchange conditionally after gmc sw init to reserve bad pages as early as possible. Since this is a conditional call, we will need to call it again unconditionally later in the init sequence. Refactor the data exchange function so it can be called multiple times without re-initializing the work item. v2: Cleaned up the code. Kept the original call to init_exchange_data() inside early init to initialize the work item, afterwards call exchange_data() when needed. Signed-off-by: Victor Skvortsov <victor.skvortsov@amd.com> Reviewed By: Shaoyun.liu <Shaoyun.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
87a4e51fb8
commit
9d18013dac
@@ -2181,6 +2181,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||||
|
/* Try to reserve bad pages early */
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
amdgpu_virt_exchange_data(adev);
|
||||||
|
|
||||||
r = amdgpu_device_vram_scratch_init(adev);
|
r = amdgpu_device_vram_scratch_init(adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
|
||||||
@@ -2212,7 +2216,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
amdgpu_virt_init_data_exchange(adev);
|
amdgpu_virt_exchange_data(adev);
|
||||||
|
|
||||||
r = amdgpu_ib_pool_init(adev);
|
r = amdgpu_ib_pool_init(adev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@@ -580,17 +580,35 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint64_t bp_block_offset = 0;
|
|
||||||
uint32_t bp_block_size = 0;
|
|
||||||
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
|
|
||||||
|
|
||||||
adev->virt.fw_reserve.p_pf2vf = NULL;
|
adev->virt.fw_reserve.p_pf2vf = NULL;
|
||||||
adev->virt.fw_reserve.p_vf2pf = NULL;
|
adev->virt.fw_reserve.p_vf2pf = NULL;
|
||||||
adev->virt.vf2pf_update_interval_ms = 0;
|
adev->virt.vf2pf_update_interval_ms = 0;
|
||||||
|
|
||||||
if (adev->mman.fw_vram_usage_va != NULL) {
|
if (adev->bios != NULL) {
|
||||||
adev->virt.vf2pf_update_interval_ms = 2000;
|
adev->virt.vf2pf_update_interval_ms = 2000;
|
||||||
|
|
||||||
|
adev->virt.fw_reserve.p_pf2vf =
|
||||||
|
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||||
|
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||||
|
|
||||||
|
amdgpu_virt_read_pf2vf_data(adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adev->virt.vf2pf_update_interval_ms != 0) {
|
||||||
|
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
|
||||||
|
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
uint64_t bp_block_offset = 0;
|
||||||
|
uint32_t bp_block_size = 0;
|
||||||
|
struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
|
||||||
|
|
||||||
|
if (adev->mman.fw_vram_usage_va != NULL) {
|
||||||
|
|
||||||
adev->virt.fw_reserve.p_pf2vf =
|
adev->virt.fw_reserve.p_pf2vf =
|
||||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||||
@@ -621,16 +639,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
|||||||
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
|
||||||
|
|
||||||
amdgpu_virt_read_pf2vf_data(adev);
|
amdgpu_virt_read_pf2vf_data(adev);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adev->virt.vf2pf_update_interval_ms != 0) {
|
|
||||||
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
|
|
||||||
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
|
@@ -271,6 +271,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
|
|||||||
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
|
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
|
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
|
||||||
|
void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
|
||||||
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
|
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
|
||||||
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
|
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user