Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed drivers. Otherwise we'll have chaos even before 4.12 started in earnest. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
@@ -1037,7 +1037,6 @@ struct amdgpu_uvd {
|
||||
bool use_ctx_buf;
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool is_powergated;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1066,7 +1065,6 @@ struct amdgpu_vce {
|
||||
struct amd_sched_entity entity;
|
||||
uint32_t srbm_soft_reset;
|
||||
unsigned num_rings;
|
||||
bool is_powergated;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -1484,6 +1482,9 @@ struct amdgpu_device {
|
||||
spinlock_t gtt_list_lock;
|
||||
struct list_head gtt_list;
|
||||
|
||||
/* record hw reset is performed */
|
||||
bool has_hw_reset;
|
||||
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||
@@ -1702,13 +1703,14 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
int amdgpu_gpu_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_need_backup(struct amdgpu_device *adev);
|
||||
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
|
||||
bool amdgpu_card_posted(struct amdgpu_device *adev);
|
||||
bool amdgpu_need_post(struct amdgpu_device *adev);
|
||||
void amdgpu_update_display_priority(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
|
||||
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
u32 ip_instance, u32 ring,
|
||||
struct amdgpu_ring **out_ring);
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
|
||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||
|
||||
@@ -100,7 +100,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
|
||||
resource_size_t size = 256 * 1024; /* ??? */
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
if (!amdgpu_card_posted(adev))
|
||||
if (amdgpu_need_post(adev))
|
||||
return false;
|
||||
|
||||
adev->bios = NULL;
|
||||
|
||||
@@ -834,32 +834,57 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
case CHIP_TOPAZ:
|
||||
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
|
||||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
|
||||
else
|
||||
} else
|
||||
strcpy(fw_name, "amdgpu/topaz_smc.bin");
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
|
||||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
|
||||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
|
||||
else
|
||||
} else
|
||||
strcpy(fw_name, "amdgpu/tonga_smc.bin");
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
strcpy(fw_name, "amdgpu/fiji_smc.bin");
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
if (type == CGS_UCODE_ID_SMU)
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
else if (type == CGS_UCODE_ID_SMU_SK)
|
||||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe2) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
(adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff)))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
|
||||
} else
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
if (type == CGS_UCODE_ID_SMU)
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
else if (type == CGS_UCODE_ID_SMU_SK)
|
||||
if (type == CGS_UCODE_ID_SMU) {
|
||||
if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe3) ||
|
||||
(adev->pdev->revision == 0xe4) ||
|
||||
(adev->pdev->revision == 0xe5) ||
|
||||
(adev->pdev->revision == 0xe7) ||
|
||||
(adev->pdev->revision == 0xef))) {
|
||||
info->is_kicker = true;
|
||||
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
|
||||
} else
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
|
||||
} else if (type == CGS_UCODE_ID_SMU_SK) {
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
|
||||
}
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
|
||||
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
||||
ring, ip_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -344,8 +351,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
|
||||
* submission. This can result in a debt that can stop buffer migrations
|
||||
* temporarily.
|
||||
*/
|
||||
static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
|
||||
u64 num_bytes)
|
||||
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
|
||||
{
|
||||
spin_lock(&adev->mm_stats.lock);
|
||||
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
|
||||
|
||||
@@ -619,25 +619,29 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
|
||||
* GPU helpers function.
|
||||
*/
|
||||
/**
|
||||
* amdgpu_card_posted - check if the hw has already been initialized
|
||||
* amdgpu_need_post - check if the hw need post or not
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Check if the asic has been initialized (all asics).
|
||||
* Used at driver startup.
|
||||
* Returns true if initialized or false if not.
|
||||
* Check if the asic has been initialized (all asics) at driver startup
|
||||
* or post is needed if hw reset is performed.
|
||||
* Returns true if need or false if not.
|
||||
*/
|
||||
bool amdgpu_card_posted(struct amdgpu_device *adev)
|
||||
bool amdgpu_need_post(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
||||
if (adev->has_hw_reset) {
|
||||
adev->has_hw_reset = false;
|
||||
return true;
|
||||
}
|
||||
/* then check MEM_SIZE, in case the crtcs are off */
|
||||
reg = RREG32(mmCONFIG_MEMSIZE);
|
||||
|
||||
if (reg)
|
||||
return true;
|
||||
return false;
|
||||
|
||||
return false;
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
@@ -665,7 +669,7 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return !amdgpu_card_posted(adev);
|
||||
return amdgpu_need_post(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2071,7 +2075,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* post card */
|
||||
if (!amdgpu_card_posted(adev) || !resume) {
|
||||
if (amdgpu_need_post(adev)) {
|
||||
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu asic init failed\n");
|
||||
|
||||
@@ -487,67 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo_va: bo_va to update
|
||||
* @list: validation list
|
||||
* @operation: map or unmap
|
||||
*
|
||||
* Update the bo_va directly after setting it's address. Errors are not
|
||||
* Update the bo_va directly after setting its address. Errors are not
|
||||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct list_head *list,
|
||||
uint32_t operation)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
int r;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int r = -ERESTARTSYS;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
|
||||
tv.bo = &bo_va->bo->tbo;
|
||||
tv.shared = true;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
|
||||
|
||||
/* Provide duplicates to avoid -EALREADY */
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
if (r)
|
||||
goto error_print;
|
||||
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct amdgpu_bo *bo =
|
||||
container_of(entry->bo, struct amdgpu_bo, tbo);
|
||||
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (!amdgpu_bo_gpu_accessible(bo))
|
||||
goto error_unreserve;
|
||||
|
||||
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
|
||||
goto error_unreserve;
|
||||
if (amdgpu_gem_va_check(NULL, bo))
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
|
||||
NULL);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
goto error;
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
|
||||
error_unreserve:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
error_print:
|
||||
error:
|
||||
if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
|
||||
}
|
||||
@@ -564,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
struct amdgpu_bo_list_entry vm_pd;
|
||||
struct ttm_validate_buffer tv;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list, duplicates;
|
||||
struct list_head list;
|
||||
uint32_t invalid_flags, va_flags = 0;
|
||||
int r = 0;
|
||||
|
||||
@@ -602,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
abo = gem_to_amdgpu_bo(gobj);
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
tv.bo = &abo->tbo;
|
||||
tv.shared = true;
|
||||
tv.shared = false;
|
||||
list_add(&tv.head, &list);
|
||||
|
||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
@@ -640,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
|
||||
!amdgpu_vm_debug)
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
||||
@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
unsigned long page_align;
|
||||
u64 initial_bytes_moved;
|
||||
size_t acc_size;
|
||||
int r;
|
||||
|
||||
@@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
|
||||
thanks to write-combining
|
||||
#endif
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
|
||||
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
|
||||
@@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
||||
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
|
||||
WARN_ON(!locked);
|
||||
}
|
||||
|
||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, NULL,
|
||||
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
|
||||
&amdgpu_ttm_bo_destroy);
|
||||
if (unlikely(r != 0))
|
||||
amdgpu_cs_report_moved_bytes(adev,
|
||||
atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
if (!resv)
|
||||
ww_mutex_unlock(&bo->tbo.resv->lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
bo->tbo.priority = ilog2(bo->tbo.num_pages);
|
||||
if (kernel)
|
||||
|
||||
@@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||
/* XXX select vce level based on ring/task */
|
||||
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
} else {
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm.vce_active = false;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1286,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
||||
@@ -529,6 +529,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
|
||||
case TTM_PL_TT:
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
if (mem->start == AMDGPU_BO_INVALID_OFFSET)
|
||||
return -EINVAL;
|
||||
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
/* check if it's visible */
|
||||
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
|
||||
|
||||
@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
amdgpu_dpm_enable_uvd(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
|
||||
/* shutdown the UVD block */
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
||||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
|
||||
@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
|
||||
amdgpu_dpm_enable_vce(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
|
||||
}
|
||||
}
|
||||
mutex_unlock(&adev->vce.idle_mutex);
|
||||
|
||||
@@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
kfree(bo_va);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
@@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
|
||||
|
||||
static int ci_upload_firmware(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ci_power_info *pi = ci_get_pi(adev);
|
||||
int i, ret;
|
||||
|
||||
if (amdgpu_ci_is_smc_running(adev)) {
|
||||
@@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
|
||||
amdgpu_ci_stop_smc_clock(adev);
|
||||
amdgpu_ci_reset_smc(adev);
|
||||
|
||||
ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
|
||||
ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
|
||||
|
||||
if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
|
||||
if (amdgpu_new_state->evclk) {
|
||||
/* turn the clocks on when encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
|
||||
tmp = RREG32_SMC(ixDPM_TABLE_475);
|
||||
tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
|
||||
@@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
|
||||
ret = ci_enable_vce_dpm(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* turn the clocks off when not encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle)
|
||||
adev->pm.current_mclk = adev->clock.default_mclk;
|
||||
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return 0;
|
||||
|
||||
ret = ci_dpm_init_microcode(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (amdgpu_dpm == 0)
|
||||
return 0;
|
||||
|
||||
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ret = ci_dpm_init(adev);
|
||||
@@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!amdgpu_dpm)
|
||||
if (!amdgpu_dpm) {
|
||||
ret = ci_upload_firmware(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("ci_upload_firmware failed\n");
|
||||
return ret;
|
||||
}
|
||||
ci_dpm_start_smc(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ci_dpm_setup_asic(adev);
|
||||
@@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
ci_dpm_disable(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else {
|
||||
ci_dpm_stop_smc(adev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1176,6 +1176,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
|
||||
/* enable BM */
|
||||
pci_set_master(adev->pdev);
|
||||
adev->has_hw_reset = true;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
@@ -1722,8 +1723,8 @@ static int cik_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |*/
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_VCE |
|
||||
/* AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_ACP |
|
||||
|
||||
@@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
|
||||
default:
|
||||
encoder->possible_crtcs = 0x3;
|
||||
break;
|
||||
case 3:
|
||||
encoder->possible_crtcs = 0x7;
|
||||
break;
|
||||
case 4:
|
||||
encoder->possible_crtcs = 0xf;
|
||||
break;
|
||||
case 5:
|
||||
encoder->possible_crtcs = 0x1f;
|
||||
break;
|
||||
case 6:
|
||||
encoder->possible_crtcs = 0x3f;
|
||||
break;
|
||||
|
||||
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
|
||||
|
||||
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
|
||||
kfree(amdgpu_encoder->enc_priv);
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(amdgpu_encoder);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
|
||||
|
||||
@@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width)
|
||||
return (u32)(((u64)1 << bit_width) - 1);
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
|
||||
u32 max_rb_num_per_se,
|
||||
u32 sh_per_se)
|
||||
static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE) |
|
||||
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
|
||||
data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
|
||||
data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
|
||||
|
||||
mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
|
||||
mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/
|
||||
adev->gfx.config.max_sh_per_se);
|
||||
|
||||
return data & mask;
|
||||
return ~data & mask;
|
||||
}
|
||||
|
||||
static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
|
||||
@@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
}
|
||||
|
||||
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_per_se,
|
||||
u32 max_rb_num_per_se)
|
||||
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
u32 data, mask;
|
||||
u32 disabled_rbs = 0;
|
||||
u32 enabled_rbs = 0;
|
||||
u32 data;
|
||||
u32 raster_config = 0;
|
||||
u32 active_rbs = 0;
|
||||
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
|
||||
adev->gfx.config.max_sh_per_se;
|
||||
unsigned num_rb_pipes;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
|
||||
disabled_rbs |= data << ((i * sh_per_se + j) * 2);
|
||||
data = gfx_v6_0_get_rb_active_bitmap(adev);
|
||||
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
|
||||
rb_bitmap_width_per_sh);
|
||||
}
|
||||
}
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
mask = 1;
|
||||
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
|
||||
if (!(disabled_rbs & mask))
|
||||
enabled_rbs |= mask;
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
adev->gfx.config.backend_enable_mask = enabled_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(enabled_rbs);
|
||||
adev->gfx.config.backend_enable_mask = active_rbs;
|
||||
adev->gfx.config.num_rbs = hweight32(active_rbs);
|
||||
|
||||
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
|
||||
adev->gfx.config.max_shader_engines, 16);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
|
||||
data = 0;
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
switch (enabled_rbs & 3) {
|
||||
case 1:
|
||||
data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
case 2:
|
||||
data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
case 3:
|
||||
default:
|
||||
data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
|
||||
break;
|
||||
}
|
||||
enabled_rbs >>= 2;
|
||||
}
|
||||
gfx_v6_0_raster_config(adev, &data);
|
||||
gfx_v6_0_raster_config(adev, &raster_config);
|
||||
|
||||
if (!adev->gfx.config.backend_enable_mask ||
|
||||
adev->gfx.config.num_rbs >= num_rb_pipes)
|
||||
WREG32(mmPA_SC_RASTER_CONFIG, data);
|
||||
else
|
||||
gfx_v6_0_write_harvested_raster_configs(adev, data,
|
||||
adev->gfx.config.backend_enable_mask,
|
||||
num_rb_pipes);
|
||||
if (!adev->gfx.config.backend_enable_mask ||
|
||||
adev->gfx.config.num_rbs >= num_rb_pipes) {
|
||||
WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
|
||||
} else {
|
||||
gfx_v6_0_write_harvested_raster_configs(adev, raster_config,
|
||||
adev->gfx.config.backend_enable_mask,
|
||||
num_rb_pipes);
|
||||
}
|
||||
|
||||
/* cache the values for userspace */
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
adev->gfx.config.rb_config[i][j].rb_backend_disable =
|
||||
RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
|
||||
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
adev->gfx.config.rb_config[i][j].raster_config =
|
||||
RREG32(mmPA_SC_RASTER_CONFIG);
|
||||
}
|
||||
}
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
@@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
|
||||
}
|
||||
*/
|
||||
|
||||
static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
|
||||
static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
|
||||
u32 bitmap)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
if (!bitmap)
|
||||
return;
|
||||
|
||||
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
|
||||
WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
|
||||
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
|
||||
data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
|
||||
RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
|
||||
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
|
||||
|
||||
mask = gfx_v6_0_create_bitmask(cu_per_sh);
|
||||
|
||||
return ~data & mask;
|
||||
mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
|
||||
return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
|
||||
}
|
||||
|
||||
|
||||
static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_per_se,
|
||||
u32 cu_per_sh)
|
||||
static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j, k;
|
||||
u32 data, mask;
|
||||
u32 active_cu = 0;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < se_num; i++) {
|
||||
for (j = 0; j < sh_per_se; j++) {
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
|
||||
active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
|
||||
active_cu = gfx_v6_0_get_cu_enabled(adev);
|
||||
|
||||
mask = 1;
|
||||
for (k = 0; k < 16; k++) {
|
||||
@@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
||||
gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT;
|
||||
break;
|
||||
}
|
||||
gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK;
|
||||
if (adev->gfx.config.max_shader_engines == 2)
|
||||
gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT;
|
||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||
|
||||
WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
|
||||
@@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
|
||||
#endif
|
||||
gfx_v6_0_tiling_mode_table_init(adev);
|
||||
|
||||
gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
|
||||
adev->gfx.config.max_sh_per_se,
|
||||
adev->gfx.config.max_backends_per_se);
|
||||
gfx_v6_0_setup_rb(adev);
|
||||
|
||||
gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
|
||||
adev->gfx.config.max_sh_per_se,
|
||||
adev->gfx.config.max_cu_per_sh);
|
||||
gfx_v6_0_setup_spi(adev);
|
||||
|
||||
gfx_v6_0_get_cu_info(adev);
|
||||
|
||||
@@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
|
||||
u32 se, u32 sh)
|
||||
{
|
||||
|
||||
u32 mask = 0, tmp, tmp1;
|
||||
int i;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
|
||||
tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
|
||||
tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
tmp &= 0xffff0000;
|
||||
|
||||
tmp |= tmp1;
|
||||
tmp >>= 16;
|
||||
|
||||
for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
|
||||
mask <<= 1;
|
||||
mask |= 1;
|
||||
}
|
||||
|
||||
return (~tmp) & mask;
|
||||
}
|
||||
|
||||
static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 i, j, k, active_cu_number = 0;
|
||||
u32 tmp;
|
||||
|
||||
u32 mask, counter, cu_bitmap;
|
||||
u32 tmp = 0;
|
||||
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
|
||||
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
cu_bitmap = 0;
|
||||
counter = 0;
|
||||
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
|
||||
if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
|
||||
if (counter < 2)
|
||||
cu_bitmap |= mask;
|
||||
counter++;
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
|
||||
active_cu_number += counter;
|
||||
tmp |= (cu_bitmap << (i * 16 + j * 8));
|
||||
}
|
||||
}
|
||||
|
||||
WREG32(mmRLC_PG_AO_CU_MASK, tmp);
|
||||
WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number);
|
||||
tmp = RREG32(mmRLC_MAX_PG_CU);
|
||||
tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
|
||||
tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
|
||||
WREG32(mmRLC_MAX_PG_CU, tmp);
|
||||
}
|
||||
|
||||
static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
|
||||
@@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
|
||||
int i, j, k, counter, active_cu_number = 0;
|
||||
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
|
||||
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
|
||||
unsigned disable_masks[4 * 2];
|
||||
|
||||
memset(cu_info, 0, sizeof(*cu_info));
|
||||
|
||||
amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
mask = 1;
|
||||
ao_bitmap = 0;
|
||||
counter = 0;
|
||||
bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
|
||||
gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
if (i < 4 && j < 2)
|
||||
gfx_v6_0_set_user_cu_inactive_bitmap(
|
||||
adev, disable_masks[i * 2 + j]);
|
||||
bitmap = gfx_v6_0_get_cu_enabled(adev);
|
||||
cu_info->bitmap[i][j] = bitmap;
|
||||
|
||||
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
|
||||
for (k = 0; k < 16; k++) {
|
||||
if (bitmap & mask) {
|
||||
if (counter < 2)
|
||||
ao_bitmap |= mask;
|
||||
@@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
cu_info->number = active_cu_number;
|
||||
cu_info->ao_cu_mask = ao_cu_mask;
|
||||
}
|
||||
|
||||
@@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
|
||||
WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
|
||||
(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
|
||||
WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
|
||||
|
||||
tmp = RREG32(mmSPI_ARB_PRIORITY);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
|
||||
WREG32(mmSPI_ARB_PRIORITY, tmp);
|
||||
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
udelay(50);
|
||||
|
||||
@@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
||||
PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
|
||||
(adev->gfx.config.sc_earlyz_tile_fifo_size <<
|
||||
PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
|
||||
|
||||
tmp = RREG32(mmSPI_ARB_PRIORITY);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
|
||||
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
|
||||
WREG32(mmSPI_ARB_PRIORITY, tmp);
|
||||
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
}
|
||||
@@ -7260,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c
|
||||
static union {
|
||||
struct amdgpu_ce_ib_state regular;
|
||||
struct amdgpu_ce_ib_state_chained_ib chained;
|
||||
} ce_payload = {0};
|
||||
} ce_payload = {};
|
||||
|
||||
if (ring->adev->virt.chained_ib_support) {
|
||||
ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload);
|
||||
@@ -7287,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c
|
||||
static union {
|
||||
struct amdgpu_de_ib_state regular;
|
||||
struct amdgpu_de_ib_state_chained_ib chained;
|
||||
} de_payload = {0};
|
||||
} de_payload = {};
|
||||
|
||||
gds_addr = csa_addr + 4096;
|
||||
if (ring->adev->virt.chained_ib_support) {
|
||||
|
||||
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
||||
}
|
||||
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
|
||||
|
||||
if (adev->mode_info.num_crtc)
|
||||
amdgpu_display_set_vga_render_state(adev, false);
|
||||
|
||||
gmc_v6_0_mc_stop(adev, &save);
|
||||
|
||||
if (gmc_v6_0_wait_for_idle((void *)adev)) {
|
||||
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
|
||||
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
gmc_v6_0_mc_resume(adev, &save);
|
||||
amdgpu_display_set_vga_render_state(adev, false);
|
||||
}
|
||||
|
||||
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
|
||||
|
||||
@@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
||||
|
||||
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
|
||||
kv_dpm_powergate_vce(adev, false);
|
||||
/* turn the clocks on when encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (pi->caps_stable_p_state)
|
||||
pi->vce_boot_level = table->count - 1;
|
||||
else
|
||||
@@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
|
||||
amdgpu_kv_send_msg_to_smc_with_parameter(adev,
|
||||
PPSMC_MSG_VCEDPM_SetEnabledMask,
|
||||
(1 << pi->vce_boot_level));
|
||||
|
||||
kv_enable_vce_dpm(adev, true);
|
||||
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
|
||||
kv_enable_vce_dpm(adev, false);
|
||||
/* turn the clocks off when not encoding */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
}
|
||||
|
||||
@@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
int ret;
|
||||
|
||||
if (pi->uvd_power_gated == gate)
|
||||
return;
|
||||
|
||||
pi->uvd_power_gated = gate;
|
||||
|
||||
if (gate) {
|
||||
if (pi->caps_uvd_pg) {
|
||||
/* disable clockgating so we can properly shut down the block */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
/* shutdown the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
/* stop the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
kv_update_uvd_dpm(adev, gate);
|
||||
if (pi->caps_uvd_pg)
|
||||
/* power off the UVD block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
|
||||
} else {
|
||||
if (pi->caps_uvd_pg) {
|
||||
if (pi->caps_uvd_pg)
|
||||
/* power on the UVD block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
|
||||
/* re-init the UVD block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
|
||||
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
kv_update_uvd_dpm(adev, gate);
|
||||
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
}
|
||||
}
|
||||
|
||||
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(adev);
|
||||
int ret;
|
||||
|
||||
if (pi->vce_power_gated == gate)
|
||||
return;
|
||||
|
||||
pi->vce_power_gated = gate;
|
||||
|
||||
if (gate) {
|
||||
if (pi->caps_vce_pg) {
|
||||
/* shutdown the VCE block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
/* XXX: check for errors */
|
||||
/* power off the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
}
|
||||
} else {
|
||||
if (pi->caps_vce_pg) {
|
||||
/* power on the VCE block */
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
/* re-init the VCE block */
|
||||
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
/* XXX: check for errors */
|
||||
}
|
||||
}
|
||||
if (!pi->caps_vce_pg)
|
||||
return;
|
||||
|
||||
if (gate)
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
else
|
||||
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
|
||||
}
|
||||
|
||||
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
|
||||
@@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle)
|
||||
|
||||
kv_dpm_powergate_acp(adev, true);
|
||||
kv_dpm_powergate_samu(adev, true);
|
||||
kv_dpm_powergate_vce(adev, true);
|
||||
kv_dpm_powergate_uvd(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
|
||||
{PA_SC_RASTER_CONFIG, false, true},
|
||||
};
|
||||
|
||||
static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
|
||||
u32 se_num, u32 sh_num,
|
||||
u32 reg_offset)
|
||||
static uint32_t si_get_register_value(struct amdgpu_device *adev,
|
||||
bool indexed, u32 se_num,
|
||||
u32 sh_num, u32 reg_offset)
|
||||
{
|
||||
uint32_t val;
|
||||
if (indexed) {
|
||||
uint32_t val;
|
||||
unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
|
||||
unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
|
||||
switch (reg_offset) {
|
||||
case mmCC_RB_BACKEND_DISABLE:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
|
||||
case mmGC_USER_RB_BACKEND_DISABLE:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
|
||||
case mmPA_SC_RASTER_CONFIG:
|
||||
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
|
||||
}
|
||||
|
||||
val = RREG32(reg_offset);
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
|
||||
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
return val;
|
||||
val = RREG32(reg_offset);
|
||||
|
||||
if (se_num != 0xffffffff || sh_num != 0xffffffff)
|
||||
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
return val;
|
||||
} else {
|
||||
unsigned idx;
|
||||
|
||||
switch (reg_offset) {
|
||||
case mmGB_ADDR_CONFIG:
|
||||
return adev->gfx.config.gb_addr_config;
|
||||
case mmMC_ARB_RAMCFG:
|
||||
return adev->gfx.config.mc_arb_ramcfg;
|
||||
case mmGB_TILE_MODE0:
|
||||
case mmGB_TILE_MODE1:
|
||||
case mmGB_TILE_MODE2:
|
||||
case mmGB_TILE_MODE3:
|
||||
case mmGB_TILE_MODE4:
|
||||
case mmGB_TILE_MODE5:
|
||||
case mmGB_TILE_MODE6:
|
||||
case mmGB_TILE_MODE7:
|
||||
case mmGB_TILE_MODE8:
|
||||
case mmGB_TILE_MODE9:
|
||||
case mmGB_TILE_MODE10:
|
||||
case mmGB_TILE_MODE11:
|
||||
case mmGB_TILE_MODE12:
|
||||
case mmGB_TILE_MODE13:
|
||||
case mmGB_TILE_MODE14:
|
||||
case mmGB_TILE_MODE15:
|
||||
case mmGB_TILE_MODE16:
|
||||
case mmGB_TILE_MODE17:
|
||||
case mmGB_TILE_MODE18:
|
||||
case mmGB_TILE_MODE19:
|
||||
case mmGB_TILE_MODE20:
|
||||
case mmGB_TILE_MODE21:
|
||||
case mmGB_TILE_MODE22:
|
||||
case mmGB_TILE_MODE23:
|
||||
case mmGB_TILE_MODE24:
|
||||
case mmGB_TILE_MODE25:
|
||||
case mmGB_TILE_MODE26:
|
||||
case mmGB_TILE_MODE27:
|
||||
case mmGB_TILE_MODE28:
|
||||
case mmGB_TILE_MODE29:
|
||||
case mmGB_TILE_MODE30:
|
||||
case mmGB_TILE_MODE31:
|
||||
idx = (reg_offset - mmGB_TILE_MODE0);
|
||||
return adev->gfx.config.tile_mode_array[idx];
|
||||
default:
|
||||
return RREG32(reg_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int si_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||
u32 sh_num, u32 reg_offset, u32 *value)
|
||||
{
|
||||
@@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||
continue;
|
||||
|
||||
if (!si_allowed_read_registers[i].untouched)
|
||||
*value = si_allowed_read_registers[i].grbm_indexed ?
|
||||
si_read_indexed_register(adev, se_num,
|
||||
sh_num, reg_offset) :
|
||||
RREG32(reg_offset);
|
||||
*value = si_get_register_value(adev,
|
||||
si_allowed_read_registers[i].grbm_indexed,
|
||||
se_num, sh_num, reg_offset);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
|
||||
@@ -143,8 +143,8 @@
|
||||
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
|
||||
|
||||
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
|
||||
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
|
||||
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
|
||||
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
|
||||
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
|
||||
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
|
||||
@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
||||
@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v4_2_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v4_2_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t rb_bufsz;
|
||||
int i, j, r;
|
||||
u32 tmp;
|
||||
/* disable byte swapping */
|
||||
u32 lmi_swap_cntl = 0;
|
||||
u32 mp_swap_cntl = 0;
|
||||
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
/* set uvd busy */
|
||||
WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
|
||||
|
||||
uvd_v4_2_set_dcm(adev, true);
|
||||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
mdelay(1);
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
|
||||
/* take UVD block out of reset */
|
||||
WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
|
||||
mdelay(5);
|
||||
|
||||
/* initialize UVD memory controller */
|
||||
WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
|
||||
(1 << 21) | (1 << 9) | (1 << 20));
|
||||
/* enable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
/* swap (8 in 32) RB and IB */
|
||||
@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
#endif
|
||||
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
|
||||
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
|
||||
/* initialize UVD memory controller */
|
||||
WREG32(mmUVD_LMI_CTRL, 0x203108);
|
||||
|
||||
tmp = RREG32(mmUVD_MPC_CNTL);
|
||||
WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
|
||||
|
||||
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
|
||||
WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
|
||||
@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
WREG32(mmUVD_MPC_SET_ALU, 0);
|
||||
WREG32(mmUVD_MPC_SET_MUX, 0x88);
|
||||
|
||||
/* take all subblocks out of reset, except VCPU */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* enable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
|
||||
tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
|
||||
WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
|
||||
|
||||
/* enable UMC */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
/* boot up the VCPU */
|
||||
WREG32(mmUVD_SOFT_RESET, 0);
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
|
||||
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
|
||||
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
|
||||
mdelay(10);
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
/* enable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
|
||||
|
||||
WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
|
||||
|
||||
/* force RBC into idle state */
|
||||
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||
|
||||
@@ -393,22 +389,57 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
*/
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
/* force RBC into idle state */
|
||||
uint32_t i, j;
|
||||
uint32_t status;
|
||||
|
||||
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_STATUS);
|
||||
if (status & 2)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
if (status & 2)
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_LMI_STATUS);
|
||||
if (status & 0xf)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
if (status & 0xf)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
mdelay(1);
|
||||
|
||||
/* put VCPU into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
|
||||
mdelay(5);
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmUVD_LMI_STATUS);
|
||||
if (status & 0x240)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
if (status & 0x240)
|
||||
break;
|
||||
}
|
||||
|
||||
/* disable VCPU clock */
|
||||
WREG32(mmUVD_VCPU_CNTL, 0x0);
|
||||
WREG32_P(0x3D49, 0, ~(1 << 2));
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
|
||||
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
}
|
||||
@@ -694,8 +725,26 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
|
||||
mdelay(20);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
|
||||
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
|
||||
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
|
||||
mdelay(30);
|
||||
}
|
||||
}
|
||||
return uvd_v4_2_start(adev);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
r = uvd_v5_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
|
||||
amdgpu_ring_write(ring, 3);
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
done:
|
||||
if (!r)
|
||||
DRM_INFO("UVD initialized successfully.\n");
|
||||
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v5_0_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v5_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
|
||||
|
||||
uvd_v5_0_mc_resume(adev);
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v5_0_enable_mgcg(adev, true);
|
||||
|
||||
/* disable interupt */
|
||||
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
|
||||
|
||||
@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v5_0_wait_for_idle(handle))
|
||||
@@ -824,17 +823,12 @@ static int uvd_v5_0_set_powergating_state(void *handle,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v5_0_stop(adev);
|
||||
adev->uvd.is_powergated = true;
|
||||
} else {
|
||||
ret = uvd_v5_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->uvd.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -848,7 +842,8 @@ static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->uvd.is_powergated) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
|
||||
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
r = uvd_v6_0_start(adev);
|
||||
if (r)
|
||||
goto done;
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v6_0_enable_mgcg(adev, true);
|
||||
|
||||
ring->ready = true;
|
||||
r = amdgpu_ring_test_ring(ring);
|
||||
@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
|
||||
uvd_v6_0_stop(adev);
|
||||
if (RREG32(mmUVD_STATUS) != 0)
|
||||
uvd_v6_0_stop(adev);
|
||||
|
||||
ring->ready = false;
|
||||
|
||||
return 0;
|
||||
@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
|
||||
lmi_swap_cntl = 0;
|
||||
mp_swap_cntl = 0;
|
||||
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
|
||||
uvd_v6_0_enable_mgcg(adev, true);
|
||||
uvd_v6_0_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
WREG32(mmUVD_STATUS, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v6_0_wait_for_idle(handle))
|
||||
@@ -1049,19 +1047,14 @@ static int uvd_v6_0_set_powergating_state(void *handle,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v6_0_stop(adev);
|
||||
adev->uvd.is_powergated = true;
|
||||
} else {
|
||||
ret = uvd_v6_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->uvd.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -1075,7 +1068,8 @@ static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->uvd.is_powergated) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
|
||||
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -42,10 +42,9 @@
|
||||
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
|
||||
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int vce_v2_0_wait_for_idle(void *handle);
|
||||
|
||||
/**
|
||||
* vce_v2_0_ring_get_rptr - get read pointer
|
||||
*
|
||||
@@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
|
||||
}
|
||||
|
||||
static void vce_v2_0_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_A);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
tmp |= 0x40000;
|
||||
WREG32(mmVCE_CLOCK_GATING_A, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0x10;
|
||||
tmp &= ~0x100000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t addr = adev->vce.gpu_addr;
|
||||
uint32_t size;
|
||||
|
||||
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
|
||||
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
|
||||
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
|
||||
|
||||
WREG32(mmVCE_LMI_CTRL, 0x00398000);
|
||||
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
|
||||
WREG32(mmVCE_LMI_VM_CTRL, 0);
|
||||
|
||||
addr += AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
size = VCE_V2_0_FW_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_STACK_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_DATA_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
|
||||
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
|
||||
}
|
||||
|
||||
static bool vce_v2_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int vce_v2_0_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (vce_v2_0_is_idle(handle))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* vce_v2_0_start - start VCE block
|
||||
*
|
||||
@@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
|
||||
vce_v2_0_mc_resume(adev);
|
||||
|
||||
/* set BUSY flag */
|
||||
WREG32_P(mmVCE_STATUS, 1, ~1);
|
||||
|
||||
vce_v2_0_init_cg(adev);
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
vce_v2_0_mc_resume(adev);
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
WREG32(mmVCE_RB_RPTR, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
@@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vce_v2_0_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
int status;
|
||||
|
||||
if (vce_v2_0_lmi_clean(adev)) {
|
||||
DRM_INFO("vce is not idle \n");
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmVCE_FW_REG_STATUS);
|
||||
if (!(status & 1))
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
*/
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gateing");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Stall UMC and register bus before resetting VCPU */
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
|
||||
|
||||
for (i = 0; i < 10; ++i) {
|
||||
for (j = 0; j < 100; ++j) {
|
||||
status = RREG32(mmVCE_LMI_STATUS);
|
||||
if (status & 0x240)
|
||||
break;
|
||||
mdelay(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
|
||||
|
||||
/* put LMI, VCPU, RBC etc... into reset */
|
||||
WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
|
||||
|
||||
WREG32(mmVCE_STATUS, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (gated) {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
} else {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe7;
|
||||
tmp &= ~0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp |= 0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 orig, tmp;
|
||||
|
||||
/* LMI_MC/LMI_UMC always set in dynamic,
|
||||
* set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
|
||||
*/
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp &= ~0x00060006;
|
||||
|
||||
/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
|
||||
if (gated) {
|
||||
tmp |= 0xe10000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
} else {
|
||||
tmp |= 0xe1;
|
||||
tmp &= ~0xe10000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
if (tmp != orig)
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
if (tmp != orig)
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
|
||||
|
||||
if(gated)
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
}
|
||||
|
||||
static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
|
||||
bool sw_cg)
|
||||
{
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, true);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, true);
|
||||
} else {
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, false);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
static int vce_v2_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle)
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = vce_v2_0_start(adev);
|
||||
/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
|
||||
if (r)
|
||||
return 0;
|
||||
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
vce_v2_0_enable_mgcg(adev, true, false);
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
|
||||
@@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle)
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (gated) {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp &= ~0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
} else {
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0xe7;
|
||||
tmp &= ~0xe70000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp |= 0x1fe000;
|
||||
tmp &= ~0xff000000;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
tmp |= 0x3fc;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
|
||||
{
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gateing");
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
|
||||
|
||||
if (vce_v2_0_lmi_clean(adev)) {
|
||||
DRM_INFO("LMI is busy, Can't set clock gateing");
|
||||
return;
|
||||
}
|
||||
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
|
||||
WREG32_P(mmVCE_SOFT_RESET,
|
||||
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
|
||||
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
|
||||
WREG32(mmVCE_STATUS, 0);
|
||||
|
||||
if (gated)
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
|
||||
/* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
|
||||
if (gated) {
|
||||
/* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
|
||||
} else {
|
||||
/* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
|
||||
}
|
||||
|
||||
/* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
|
||||
|
||||
/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
|
||||
if(!gated) {
|
||||
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
|
||||
mdelay(100);
|
||||
WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
|
||||
|
||||
vce_v2_0_firmware_loaded(adev);
|
||||
WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
|
||||
}
|
||||
|
||||
static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
bool sw_cg = false;
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, true);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, true);
|
||||
} else {
|
||||
vce_v2_0_disable_cg(adev);
|
||||
|
||||
if (sw_cg)
|
||||
vce_v2_0_set_sw_cg(adev, false);
|
||||
else
|
||||
vce_v2_0_set_dyn_cg(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void vce_v2_0_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_A);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
tmp |= 0x40000;
|
||||
WREG32(mmVCE_CLOCK_GATING_A, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
|
||||
tmp &= ~0xfff;
|
||||
tmp |= ((0 << 0) | (4 << 4));
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
|
||||
|
||||
tmp = RREG32(mmVCE_CLOCK_GATING_B);
|
||||
tmp |= 0x10;
|
||||
tmp &= ~0x100000;
|
||||
WREG32(mmVCE_CLOCK_GATING_B, tmp);
|
||||
}
|
||||
|
||||
static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t addr = adev->vce.gpu_addr;
|
||||
uint32_t size;
|
||||
|
||||
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
|
||||
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
|
||||
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
|
||||
WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
|
||||
|
||||
WREG32(mmVCE_LMI_CTRL, 0x00398000);
|
||||
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
|
||||
WREG32(mmVCE_LMI_VM_CTRL, 0);
|
||||
|
||||
addr += AMDGPU_VCE_FIRMWARE_OFFSET;
|
||||
size = VCE_V2_0_FW_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_STACK_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = VCE_V2_0_DATA_SIZE;
|
||||
WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
|
||||
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
|
||||
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
|
||||
|
||||
vce_v2_0_init_cg(adev);
|
||||
}
|
||||
|
||||
static bool vce_v2_0_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
|
||||
}
|
||||
|
||||
static int vce_v2_0_wait_for_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (vce_v2_0_is_idle(handle))
|
||||
return 0;
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int vce_v2_0_soft_reset(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
|
||||
|
||||
if (enable)
|
||||
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
else
|
||||
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
|
||||
|
||||
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
|
||||
}
|
||||
|
||||
|
||||
static int vce_v2_0_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
bool sw_cg = false;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
|
||||
|
||||
vce_v2_0_set_bypass_mode(adev, enable);
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
if (state == AMD_CG_STATE_GATE) {
|
||||
gate = true;
|
||||
sw_cg = true;
|
||||
}
|
||||
|
||||
vce_v2_0_enable_mgcg(adev, gate);
|
||||
vce_v2_0_enable_mgcg(adev, gate, sw_cg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle,
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
/* XXX do we need a vce_v2_0_stop()? */
|
||||
return 0;
|
||||
return vce_v2_0_stop(adev);
|
||||
else
|
||||
return vce_v2_0_start(adev);
|
||||
}
|
||||
|
||||
@@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring;
|
||||
int idx, r;
|
||||
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
ring = &adev->vce.ring[0];
|
||||
WREG32(mmVCE_RB_RPTR, ring->wptr);
|
||||
WREG32(mmVCE_RB_WPTR, ring->wptr);
|
||||
@@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
r = vce_v3_0_start(adev);
|
||||
if (r)
|
||||
return r;
|
||||
vce_v3_0_override_vce_clock_gating(adev, true);
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
|
||||
|
||||
for (i = 0; i < adev->vce.num_rings; i++)
|
||||
adev->vce.ring[i].ready = false;
|
||||
@@ -514,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
|
||||
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
|
||||
WREG32(mmVCE_LMI_VM_CTRL, 0);
|
||||
WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
|
||||
|
||||
if (adev->asic_type >= CHIP_STONEY) {
|
||||
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
|
||||
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
|
||||
@@ -766,17 +764,14 @@ static int vce_v3_0_set_powergating_state(void *handle,
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret = 0;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
adev->vce.is_powergated = true;
|
||||
/* XXX do we need a vce_v3_0_stop()? */
|
||||
ret = vce_v3_0_stop(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
ret = vce_v3_0_start(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
adev->vce.is_powergated = false;
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -790,7 +785,8 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (adev->vce.is_powergated) {
|
||||
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
|
||||
CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
|
||||
DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -721,6 +721,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
|
||||
if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
|
||||
/* enable BM */
|
||||
pci_set_master(adev->pdev);
|
||||
adev->has_hw_reset = true;
|
||||
return 0;
|
||||
}
|
||||
udelay(1);
|
||||
|
||||
@@ -1310,5 +1310,6 @@
|
||||
#define ixROM_SW_DATA_62 0xc060012c
|
||||
#define ixROM_SW_DATA_63 0xc0600130
|
||||
#define ixROM_SW_DATA_64 0xc0600134
|
||||
#define ixCURRENT_PG_STATUS 0xc020029c
|
||||
|
||||
#endif /* SMU_7_0_1_D_H */
|
||||
|
||||
@@ -5452,5 +5452,7 @@
|
||||
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
|
||||
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
||||
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
||||
|
||||
#endif /* SMU_7_0_1_SH_MASK_H */
|
||||
|
||||
@@ -1121,5 +1121,6 @@
|
||||
#define ixROM_SW_DATA_62 0xc060011c
|
||||
#define ixROM_SW_DATA_63 0xc0600120
|
||||
#define ixROM_SW_DATA_64 0xc0600124
|
||||
#define ixCURRENT_PG_STATUS 0xc020029c
|
||||
|
||||
#endif /* SMU_7_1_1_D_H */
|
||||
|
||||
@@ -4860,5 +4860,7 @@
|
||||
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
|
||||
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
||||
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
||||
|
||||
#endif /* SMU_7_1_1_SH_MASK_H */
|
||||
|
||||
@@ -1271,5 +1271,6 @@
|
||||
#define ixROM_SW_DATA_62 0xc060011c
|
||||
#define ixROM_SW_DATA_63 0xc0600120
|
||||
#define ixROM_SW_DATA_64 0xc0600124
|
||||
#define ixCURRENT_PG_STATUS 0xc020029c
|
||||
|
||||
#endif /* SMU_7_1_2_D_H */
|
||||
|
||||
@@ -5830,5 +5830,7 @@
|
||||
#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
|
||||
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
|
||||
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
||||
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
||||
|
||||
#endif /* SMU_7_1_2_SH_MASK_H */
|
||||
|
||||
@@ -1244,5 +1244,5 @@
|
||||
#define ixGC_CAC_ACC_CU14 0xc8
|
||||
#define ixGC_CAC_ACC_CU15 0xc9
|
||||
#define ixGC_CAC_OVRD_CU 0xe7
|
||||
|
||||
#define ixCURRENT_PG_STATUS 0xc020029c
|
||||
#endif /* SMU_7_1_3_D_H */
|
||||
|
||||
@@ -6076,5 +6076,8 @@
|
||||
#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
|
||||
#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
|
||||
#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
|
||||
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
||||
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
||||
|
||||
|
||||
#endif /* SMU_7_1_3_SH_MASK_H */
|
||||
|
||||
@@ -171,6 +171,7 @@ struct cgs_firmware_info {
|
||||
uint32_t ucode_start_address;
|
||||
|
||||
void *kptr;
|
||||
bool is_kicker;
|
||||
};
|
||||
|
||||
struct cgs_mode_info {
|
||||
|
||||
@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (cz_hwmgr->uvd_power_gated == bgate)
|
||||
return 0;
|
||||
|
||||
cz_hwmgr->uvd_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, true);
|
||||
cz_dpm_powerdown_uvd(hwmgr);
|
||||
} else {
|
||||
cz_dpm_powerup_uvd(hwmgr);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cz_dpm_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
@@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_VCEPowerGating)) {
|
||||
if (cz_hwmgr->vce_power_gated != bgate) {
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
cz_enable_disable_vce_dpm(hwmgr, false);
|
||||
cz_dpm_powerdown_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = true;
|
||||
} else {
|
||||
cz_dpm_powerup_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = false;
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
cz_enable_disable_vce_dpm(hwmgr, false);
|
||||
cz_dpm_powerdown_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = true;
|
||||
} else {
|
||||
cz_hwmgr->vce_power_gated = bgate;
|
||||
cz_dpm_powerup_vce(hwmgr);
|
||||
cz_hwmgr->vce_power_gated = false;
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, !bgate);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!cz_hwmgr->vce_power_gated)
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1396,3 +1396,25 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
|
||||
uint16_t *load_line)
|
||||
{
|
||||
ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
|
||||
(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
|
||||
|
||||
const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
|
||||
|
||||
PP_ASSERT_WITH_CODE((NULL != voltage_info),
|
||||
"Could not find Voltage Table in BIOS.", return -EINVAL);
|
||||
|
||||
voltage_object = atomctrl_lookup_voltage_type_v3
|
||||
(voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
|
||||
|
||||
*svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
|
||||
*svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
|
||||
*load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -311,5 +311,8 @@ extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_a
|
||||
|
||||
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
|
||||
|
||||
extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
|
||||
uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
|
||||
uint16_t *load_line);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
data->uvd_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_GATE);
|
||||
smu7_update_uvd_dpm(hwmgr, true);
|
||||
smu7_powerdown_uvd(hwmgr);
|
||||
} else {
|
||||
smu7_powerup_uvd(hwmgr);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_UVD,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
smu7_update_uvd_dpm(hwmgr, false);
|
||||
}
|
||||
|
||||
@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
{
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
|
||||
if (data->vce_power_gated == bgate)
|
||||
return 0;
|
||||
|
||||
data->vce_power_gated = bgate;
|
||||
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
smu7_powerdown_vce(hwmgr);
|
||||
} else {
|
||||
smu7_powerup_vce(hwmgr);
|
||||
smu7_update_vce_dpm(hwmgr, false);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
smu7_update_vce_dpm(hwmgr, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1383,6 +1383,15 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->force_pcie_gen = PP_PCIEGenInvalid;
|
||||
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
|
||||
|
||||
if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) {
|
||||
uint8_t tmp1, tmp2;
|
||||
uint16_t tmp3 = 0;
|
||||
atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
|
||||
&tmp3);
|
||||
tmp3 = (tmp3 >> 5) & 0x3;
|
||||
data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
|
||||
}
|
||||
|
||||
data->fast_watermark_threshold = 100;
|
||||
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
|
||||
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
|
||||
@@ -2624,6 +2633,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
|
||||
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
|
||||
|
||||
break;
|
||||
case AMD_DPM_FORCED_LEVEL_MANUAL:
|
||||
hwmgr->dpm_level = level;
|
||||
@@ -2633,9 +2643,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
break;
|
||||
}
|
||||
|
||||
if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH))
|
||||
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
|
||||
else
|
||||
else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
|
||||
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
|
||||
|
||||
return 0;
|
||||
@@ -4397,16 +4407,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
||||
if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
|
||||
return -EINVAL;
|
||||
dep_sclk_table = table_info->vdd_dep_on_sclk;
|
||||
for (i = 0; i < dep_sclk_table->count; i++) {
|
||||
for (i = 0; i < dep_sclk_table->count; i++)
|
||||
clocks->clock[i] = dep_sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = dep_sclk_table->count;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
|
||||
for (i = 0; i < sclk_table->count; i++) {
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
clocks->clock[i] = sclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = sclk_table->count;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -4440,14 +4448,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
|
||||
clocks->clock[i] = dep_mclk_table->entries[i].clk;
|
||||
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
|
||||
dep_mclk_table->entries[i].clk);
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = dep_mclk_table->count;
|
||||
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
|
||||
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
|
||||
for (i = 0; i < mclk_table->count; i++) {
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
clocks->clock[i] = mclk_table->entries[i].clk;
|
||||
clocks->count++;
|
||||
}
|
||||
clocks->count = mclk_table->count;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ struct smu7_hwmgr {
|
||||
uint32_t fast_watermark_threshold;
|
||||
|
||||
/* ---- Phase Shedding ---- */
|
||||
bool vddc_phase_shed_control;
|
||||
uint8_t vddc_phase_shed_control;
|
||||
|
||||
/* ---- DI/DT ---- */
|
||||
struct smu7_display_timing display_timing;
|
||||
|
||||
@@ -477,6 +477,151 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
|
||||
{ 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
|
||||
{
|
||||
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
* Offset Mask Shift Value Type
|
||||
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
*/
|
||||
/* DIDT_SQ */
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x004c, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00d0, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0069, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x0048, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x005f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x007a, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x001f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x002d, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x0088, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
/* DIDT_TD */
|
||||
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
/* DIDT_TCP */
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
|
||||
{ ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
|
||||
|
||||
{ 0xFFFFFFFF } /* End of list */
|
||||
};
|
||||
|
||||
static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
|
||||
{
|
||||
@@ -630,7 +775,10 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
||||
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
|
||||
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
|
||||
if (hwmgr->smumgr->is_kicker)
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
|
||||
else
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
} else if (hwmgr->chip_id == CHIP_POLARIS12) {
|
||||
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
|
||||
|
||||
@@ -137,6 +137,7 @@ struct pp_smumgr {
|
||||
uint32_t usec_timeout;
|
||||
bool reload_fw;
|
||||
const struct pp_smumgr_func *smumgr_funcs;
|
||||
bool is_kicker;
|
||||
};
|
||||
|
||||
extern int smum_early_init(struct pp_instance *handle);
|
||||
|
||||
@@ -494,6 +494,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
|
||||
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
|
||||
struct phm_ppt_v1_information *table_info =
|
||||
(struct phm_ppt_v1_information *)(hwmgr->pptable);
|
||||
struct pp_smumgr *smumgr = hwmgr->smumgr;
|
||||
|
||||
state->CcPwrDynRm = 0;
|
||||
state->CcPwrDynRm1 = 0;
|
||||
@@ -502,7 +503,10 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
|
||||
state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
|
||||
VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
|
||||
|
||||
state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
|
||||
if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
|
||||
state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
|
||||
else
|
||||
state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
|
||||
|
||||
CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
|
||||
CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
|
||||
|
||||
@@ -533,6 +533,8 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
|
||||
cgs_get_firmware_info(smumgr->device,
|
||||
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
|
||||
|
||||
smumgr->is_kicker = info.is_kicker;
|
||||
|
||||
result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
|
||||
|
||||
return result;
|
||||
|
||||
@@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user