Merge tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This has two exciting community drivers for ARM Mali accelerators. Since ARM has never been open source friendly on the GPU side of the house, the community has had to create open source drivers for the Mali GPUs. Lima covers the older t4xx and panfrost the newer 6xx/7xx series. Well done to all involved and hopefully this will help ARM head in the right direction. There is also now the ability if you don't have any of the legacy drivers enabled (pre-KMS) to remove all the pre-KMS support code from the core drm, this saves 10% or so in codesize on my machine. i915 also enable Icelake/Elkhart Lake Gen11 GPUs by default, vboxvideo moves out of staging. There are also some rcar-du patches which crossover with media tree but all should be acked by Mauro. Summary: uapi changes: - Colorspace connector property - fourcc - new YUV formts - timeline sync objects initially merged - expose FB_DAMAGE_CLIPS to atomic userspace new drivers: - vboxvideo: moved out of staging - aspeed: ASPEED SoC BMC chip display support - lima: ARM Mali4xx GPU acceleration driver support - panfrost: ARM Mali6xx/7xx Midgard/Bitfrost acceleration driver support core: - component helper docs - unplugging fixes - devm device init - MIPI/DSI rate control - shmem backed gem objects - connector, display_info, edid_quirks cleanups - dma_buf fence chain support - 64-bit dma-fence seqno comparison fixes - move initial fb config code to core - gem fence array helpers for Lima - ability to remove legacy support code if no drivers requires it (removes 10% of drm.ko size) - lease fixes ttm: - unified DRM_FILE_PAGE_OFFSET handling - Account for kernel allocations in kernel zone only panel: - OSD070T1718-19TS panel support - panel-tpo-td028ttec1 backlight support - Ronbo RB070D30 MIPI/DSI - Feiyang FY07024DI26A30-D MIPI-DSI panel - Rocktech jh057n00900 MIPI-DSI panel i915: - Comet Lake (Gen9) PCI IDs - Updated Icelake PCI IDs - Elkhartlake (Gen11) support - DP MST property addtions - plane and watermark fixes - Icelake port sync and VEBOX disable fixes - struct_mutex usage reduction - Icelake gamma fix - GuC reset fixes - make mmap more asynchronous - sound display power well race fixes - DDI/MIPI-DSI clocks for Icelake - Icelake RPS frequency changing support - Icelake workarounds amdgpu: - Use HMM for userptr - vega20 experimental smu11 support - RAS support for vega20 - BACO support for vega12 + fixes for vega20 - reworked IH interrupt handling - amdkfd RAS support - Freesync improvements - initial timeline sync object support - DC Z ordering fixes - NV12 planes support - colorspace properties for planes= - eDP opts if eDP already initialized nouveau: - misc fixes etnaviv: - misc fixes msm: - GPU zap shader support expansion - robustness ABI addition exynos: - Logging cleanups tegra: - Shared reset fix - CPU cache maintenance fix cirrus: - driver rewritten using simple helpers meson: - G12A support vmwgfx: - Resource dirtying management improvements - Userspace logging improvements virtio: - PRIME fixes rockchip: - rk3066 hdmi support sun4i: - DSI burst mode support vc4: - load tracker to detect underflow v3d: - v3d v4.2 support malidp: - initial Mali D71 support in komeda driver tfp410: - omap related improvement omapdrm: - drm bridge/panel support - drop some omap specific panels rcar-du: - Display writeback support" * tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm: (1507 commits) drm/msm/a6xx: No zap shader is not an error drm/cma-helper: Fix drm_gem_cma_free_object() drm: Fix timestamp docs for variable refresh properties. drm/komeda: Mark the local functions as static drm/komeda: Fixed warning: Function parameter or member not described drm/komeda: Expose bus_width to Komeda-CORE drm/komeda: Add sysfs attribute: core_id and config_id drm: add non-desktop quirk for Valve HMDs drm/panfrost: Show stored feature registers drm/panfrost: Don't scream about deferred probe drm/panfrost: Disable PM on probe failure drm/panfrost: Set DMA masks earlier drm/panfrost: Add sanity checks to submit IOCTL drm/etnaviv: initialize idle mask before querying the HW db drm: introduce a capability flag for syncobj timeline support drm: report consistent errors when checking syncobj capibility drm/nouveau/nouveau: forward error generated while resuming objects tree drm/nouveau/fb/ramgk104: fix spelling mistake "sucessfully" -> "successfully" drm/nouveau/i2c: Disable i2c bus access after ->fini() drm/nouveau: Remove duplicate ACPI_VIDEO_NOTIFY_PROBE definition ...
This commit is contained in:
@@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER
|
||||
help
|
||||
Choose this if you need the KMS CMA helper functions
|
||||
|
||||
config DRM_GEM_SHMEM_HELPER
|
||||
bool
|
||||
depends on DRM
|
||||
help
|
||||
Choose this if you need the GEM shmem helper functions
|
||||
|
||||
config DRM_VM
|
||||
bool
|
||||
depends on DRM && MMU
|
||||
@@ -225,8 +231,6 @@ config DRM_AMDGPU
|
||||
|
||||
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/amd/lib/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/nouveau/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/i915/Kconfig"
|
||||
@@ -251,6 +255,9 @@ config DRM_VKMS
|
||||
|
||||
If M is selected the module will be called vkms.
|
||||
|
||||
config DRM_ATI_PCIGART
|
||||
bool
|
||||
|
||||
source "drivers/gpu/drm/exynos/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/rockchip/Kconfig"
|
||||
@@ -329,12 +336,21 @@ source "drivers/gpu/drm/tve200/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/xen/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/vboxvideo/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/lima/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/panfrost/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/aspeed/Kconfig"
|
||||
|
||||
# Keep legacy drivers last
|
||||
|
||||
menuconfig DRM_LEGACY
|
||||
bool "Enable legacy drivers (DANGEROUS)"
|
||||
depends on DRM && MMU
|
||||
select DRM_VM
|
||||
select DRM_ATI_PCIGART if PCI
|
||||
help
|
||||
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
|
||||
APIs to user-space, which can be used to circumvent access
|
||||
|
@@ -3,11 +3,9 @@
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
|
||||
drm_context.o drm_dma.o \
|
||||
drm-y := drm_auth.o drm_cache.o \
|
||||
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
|
||||
drm_lock.o drm_memory.o drm_drv.o \
|
||||
drm_scatter.o drm_pci.o \
|
||||
drm_memory.o drm_drv.o drm_pci.o \
|
||||
drm_sysfs.o drm_hashtab.o drm_mm.o \
|
||||
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
|
||||
drm_encoder_slave.o \
|
||||
@@ -21,11 +19,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
|
||||
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
|
||||
drm_atomic_uapi.o
|
||||
|
||||
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
|
||||
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
|
||||
drm-$(CONFIG_DRM_VM) += drm_vm.o
|
||||
drm-$(CONFIG_COMPAT) += drm_ioc32.o
|
||||
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
|
||||
drm-$(CONFIG_PCI) += ati_pcigart.o
|
||||
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
|
||||
drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
|
||||
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
|
||||
drm-$(CONFIG_OF) += drm_of.o
|
||||
drm-$(CONFIG_AGP) += drm_agpsupport.o
|
||||
@@ -37,7 +37,8 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper
|
||||
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
|
||||
drm_simple_kms_helper.o drm_modeset_helper.o \
|
||||
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
|
||||
drm_atomic_state_helper.o drm_damage_helper.o
|
||||
drm_atomic_state_helper.o drm_damage_helper.o \
|
||||
drm_format_helper.o
|
||||
|
||||
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
|
||||
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
|
||||
@@ -56,7 +57,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
|
||||
obj-$(CONFIG_DRM_SCHED) += scheduler/
|
||||
obj-$(CONFIG_DRM_TDFX) += tdfx/
|
||||
obj-$(CONFIG_DRM_R128) += r128/
|
||||
obj-y += amd/lib/
|
||||
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
||||
obj-$(CONFIG_DRM_RADEON)+= radeon/
|
||||
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
|
||||
@@ -109,3 +109,7 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
|
||||
obj-$(CONFIG_DRM_PL111) += pl111/
|
||||
obj-$(CONFIG_DRM_TVE200) += tve200/
|
||||
obj-$(CONFIG_DRM_XEN) += xen/
|
||||
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
|
||||
obj-$(CONFIG_DRM_LIMA) += lima/
|
||||
obj-$(CONFIG_DRM_PANFROST) += panfrost/
|
||||
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
|
||||
|
@@ -23,7 +23,7 @@
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
FULL_AMD_PATH=$(src)/..
|
||||
FULL_AMD_PATH=$(srctree)/$(src)/..
|
||||
DISPLAY_FOLDER_NAME=display
|
||||
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
|
||||
|
||||
@@ -53,7 +53,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
|
||||
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
|
||||
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||
amdgpu_vm_sdma.o
|
||||
|
||||
# add asic specific block
|
||||
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
|
||||
|
@@ -83,6 +83,7 @@
|
||||
#include "amdgpu_gem.h"
|
||||
#include "amdgpu_doorbell.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
@@ -156,6 +157,8 @@ extern int amdgpu_emu_mode;
|
||||
extern uint amdgpu_smu_memory_pool_size;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
extern int amdgpu_ras_enable;
|
||||
extern uint amdgpu_ras_mask;
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
extern int amdgpu_si_support;
|
||||
@@ -433,6 +436,12 @@ struct amdgpu_cs_chunk {
|
||||
void *kdata;
|
||||
};
|
||||
|
||||
struct amdgpu_cs_post_dep {
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence_chain *chain;
|
||||
u64 point;
|
||||
};
|
||||
|
||||
struct amdgpu_cs_parser {
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_file *filp;
|
||||
@@ -462,8 +471,8 @@ struct amdgpu_cs_parser {
|
||||
/* user fence */
|
||||
struct amdgpu_bo_list_entry uf_entry;
|
||||
|
||||
unsigned num_post_dep_syncobjs;
|
||||
struct drm_syncobj **post_dep_syncobjs;
|
||||
unsigned num_post_deps;
|
||||
struct amdgpu_cs_post_dep *post_deps;
|
||||
};
|
||||
|
||||
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
|
||||
@@ -702,7 +711,6 @@ enum amd_hw_ip_block_type {
|
||||
struct amd_powerplay {
|
||||
void *pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs;
|
||||
uint32_t pp_feature;
|
||||
};
|
||||
|
||||
#define AMDGPU_RESET_MAGIC_NUM 64
|
||||
@@ -825,6 +833,7 @@ struct amdgpu_device {
|
||||
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
|
||||
struct work_struct hotplug_work;
|
||||
struct amdgpu_irq_src crtc_irq;
|
||||
struct amdgpu_irq_src vupdate_irq;
|
||||
struct amdgpu_irq_src pageflip_irq;
|
||||
struct amdgpu_irq_src hpd_irq;
|
||||
|
||||
@@ -842,6 +851,9 @@ struct amdgpu_device {
|
||||
struct amd_powerplay powerplay;
|
||||
bool pp_force_state_enabled;
|
||||
|
||||
/* smu */
|
||||
struct smu_context smu;
|
||||
|
||||
/* dpm */
|
||||
struct amdgpu_pm pm;
|
||||
u32 cg_flags;
|
||||
@@ -922,6 +934,8 @@ struct amdgpu_device {
|
||||
|
||||
int asic_reset_res;
|
||||
struct work_struct xgmi_reset_work;
|
||||
|
||||
bool in_baco_reset;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||
|
@@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
|
||||
amdgpu_bo_unref(&(bo));
|
||||
}
|
||||
|
||||
uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
|
||||
enum kgd_engine_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||
|
||||
switch (type) {
|
||||
case KGD_ENGINE_PFP:
|
||||
return adev->gfx.pfp_fw_version;
|
||||
|
||||
case KGD_ENGINE_ME:
|
||||
return adev->gfx.me_fw_version;
|
||||
|
||||
case KGD_ENGINE_CE:
|
||||
return adev->gfx.ce_fw_version;
|
||||
|
||||
case KGD_ENGINE_MEC1:
|
||||
return adev->gfx.mec_fw_version;
|
||||
|
||||
case KGD_ENGINE_MEC2:
|
||||
return adev->gfx.mec2_fw_version;
|
||||
|
||||
case KGD_ENGINE_RLC:
|
||||
return adev->gfx.rlc_fw_version;
|
||||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
return adev->sdma.instance[0].fw_version;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
return adev->sdma.instance[1].fw_version;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
|
||||
struct kfd_local_mem_info *mem_info)
|
||||
{
|
||||
@@ -640,4 +677,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@@ -81,6 +81,18 @@ struct amdgpu_kfd_dev {
|
||||
uint64_t vram_used;
|
||||
};
|
||||
|
||||
enum kgd_engine_type {
|
||||
KGD_ENGINE_PFP = 1,
|
||||
KGD_ENGINE_ME,
|
||||
KGD_ENGINE_CE,
|
||||
KGD_ENGINE_MEC1,
|
||||
KGD_ENGINE_MEC2,
|
||||
KGD_ENGINE_RLC,
|
||||
KGD_ENGINE_SDMA1,
|
||||
KGD_ENGINE_SDMA2,
|
||||
KGD_ENGINE_MAX
|
||||
};
|
||||
|
||||
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
|
||||
struct mm_struct *mm);
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
@@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
||||
void **mem_obj, uint64_t *gpu_addr,
|
||||
void **cpu_ptr, bool mqd_gfx9);
|
||||
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
|
||||
uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
|
||||
enum kgd_engine_type type);
|
||||
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
|
||||
struct kfd_local_mem_info *mem_info);
|
||||
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
|
||||
@@ -230,5 +244,6 @@ int kgd2kfd_quiesce_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
|
||||
struct dma_fence *fence);
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
||||
|
||||
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
|
||||
|
@@ -22,14 +22,12 @@
|
||||
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "cikd.h"
|
||||
#include "cik_sdma.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "gfx_v7_0.h"
|
||||
#include "gca/gfx_7_2_d.h"
|
||||
#include "gca/gfx_7_2_enum.h"
|
||||
@@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
|
||||
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||
uint8_t vmid);
|
||||
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
|
||||
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
uint64_t va, uint32_t vmid);
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
@@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
||||
.address_watch_get_offset = kgd_address_watch_get_offset,
|
||||
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
|
||||
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
|
||||
.get_fw_version = get_fw_version,
|
||||
.set_scratch_backing_va = set_scratch_backing_va,
|
||||
.get_tile_config = get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
@@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
const union amdgpu_firmware_header *hdr;
|
||||
|
||||
switch (type) {
|
||||
case KGD_ENGINE_PFP:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.pfp_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_ME:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.me_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_CE:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.ce_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.mec_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.mec2_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_RLC:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.rlc_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma.instance[0].fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma.instance[1].fw->data;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hdr == NULL)
|
||||
return 0;
|
||||
|
||||
/* Only 12 bit in use*/
|
||||
return hdr->common.ucode_version;
|
||||
}
|
||||
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
uint64_t page_table_base)
|
||||
{
|
||||
|
@@ -23,12 +23,10 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "gfx_v8_0.h"
|
||||
#include "gca/gfx_8_0_sh_mask.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
@@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
||||
uint8_t vmid);
|
||||
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||
uint8_t vmid);
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
|
||||
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
uint64_t va, uint32_t vmid);
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
@@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
||||
get_atc_vmid_pasid_mapping_pasid,
|
||||
.get_atc_vmid_pasid_mapping_valid =
|
||||
get_atc_vmid_pasid_mapping_valid,
|
||||
.get_fw_version = get_fw_version,
|
||||
.set_scratch_backing_va = set_scratch_backing_va,
|
||||
.get_tile_config = get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
@@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
unlock_srbm(kgd);
|
||||
}
|
||||
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
const union amdgpu_firmware_header *hdr;
|
||||
|
||||
switch (type) {
|
||||
case KGD_ENGINE_PFP:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.pfp_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_ME:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.me_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_CE:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.ce_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.mec_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.mec2_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_RLC:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->gfx.rlc_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma.instance[0].fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma.instance[1].fw->data;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hdr == NULL)
|
||||
return 0;
|
||||
|
||||
/* Only 12 bit in use*/
|
||||
return hdr->common.ucode_version;
|
||||
}
|
||||
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
uint64_t page_table_base)
|
||||
{
|
||||
|
@@ -25,12 +25,10 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_ucode.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "gc/gc_9_0_offset.h"
|
||||
#include "gc/gc_9_0_sh_mask.h"
|
||||
@@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
||||
uint8_t vmid);
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
uint64_t page_table_base);
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
|
||||
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
uint64_t va, uint32_t vmid);
|
||||
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
|
||||
@@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
||||
get_atc_vmid_pasid_mapping_pasid,
|
||||
.get_atc_vmid_pasid_mapping_valid =
|
||||
get_atc_vmid_pasid_mapping_valid,
|
||||
.get_fw_version = get_fw_version,
|
||||
.set_scratch_backing_va = set_scratch_backing_va,
|
||||
.get_tile_config = amdgpu_amdkfd_get_tile_config,
|
||||
.set_vm_context_page_table_base = set_vm_context_page_table_base,
|
||||
@@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
|
||||
*/
|
||||
}
|
||||
|
||||
/* FIXME: Does this need to be ASIC-specific code? */
|
||||
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
||||
const union amdgpu_firmware_header *hdr;
|
||||
|
||||
switch (type) {
|
||||
case KGD_ENGINE_PFP:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_ME:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_CE:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC1:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_MEC2:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_RLC:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hdr == NULL)
|
||||
return 0;
|
||||
|
||||
/* Only 12 bit in use*/
|
||||
return hdr->common.ucode_version;
|
||||
}
|
||||
|
||||
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
|
||||
uint64_t page_table_base)
|
||||
{
|
||||
|
@@ -410,15 +410,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
if (p_bo_va_entry)
|
||||
*p_bo_va_entry = bo_va_entry;
|
||||
|
||||
/* Allocate new page tables if needed and validate
|
||||
* them.
|
||||
*/
|
||||
ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
|
||||
if (ret) {
|
||||
pr_err("Failed to allocate pts, err=%d\n", ret);
|
||||
goto err_alloc_pts;
|
||||
}
|
||||
|
||||
/* Allocate validate page tables if needed */
|
||||
ret = vm_validate_pt_pd_bos(vm);
|
||||
if (ret) {
|
||||
pr_err("validate_pt_pd_bos() failed\n");
|
||||
@@ -741,13 +733,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_bo *bo;
|
||||
|
||||
bo_va = entry->bo_va;
|
||||
vm = bo_va->base.vm;
|
||||
bo = bo_va->base.bo;
|
||||
struct amdgpu_bo_va *bo_va = entry->bo_va;
|
||||
|
||||
/* Update the page tables */
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
@@ -906,7 +892,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
pr_err("validate_pt_pd_bos() failed\n");
|
||||
goto validate_pd_fail;
|
||||
}
|
||||
amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
|
||||
ret = amdgpu_bo_sync_wait(vm->root.base.bo,
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
goto wait_pd_fail;
|
||||
amdgpu_bo_fence(vm->root.base.bo,
|
||||
|
@@ -28,8 +28,6 @@
|
||||
#include "atom.h"
|
||||
#include "atombios.h"
|
||||
|
||||
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
@@ -238,10 +236,71 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if vbios enabled ecc by default, if umc info table is available
|
||||
* or false if ecc is not enabled or umc info table is not available
|
||||
*/
|
||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
u16 data_offset, size;
|
||||
union umc_info *umc_info;
|
||||
u8 frev, crev;
|
||||
bool ecc_default_enabled = false;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
umc_info);
|
||||
|
||||
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
|
||||
index, &size, &frev, &crev, &data_offset)) {
|
||||
/* support umc_info 3.1+ */
|
||||
if ((frev == 3 && crev >= 1) || (frev > 3)) {
|
||||
umc_info = (union umc_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
ecc_default_enabled =
|
||||
(le32_to_cpu(umc_info->v31.umc_config) &
|
||||
UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
|
||||
}
|
||||
}
|
||||
|
||||
return ecc_default_enabled;
|
||||
}
|
||||
|
||||
union firmware_info {
|
||||
struct atom_firmware_info_v3_1 v31;
|
||||
};
|
||||
|
||||
/*
|
||||
* Return true if vbios supports sram ecc or false if not
|
||||
*/
|
||||
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
int index;
|
||||
u16 data_offset, size;
|
||||
union firmware_info *firmware_info;
|
||||
u8 frev, crev;
|
||||
bool sram_ecc_supported = false;
|
||||
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
firmwareinfo);
|
||||
|
||||
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
|
||||
index, &size, &frev, &crev, &data_offset)) {
|
||||
/* support firmware_info 3.1 + */
|
||||
if ((frev == 3 && crev >=1) || (frev > 3)) {
|
||||
firmware_info = (union firmware_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
sram_ecc_supported =
|
||||
(le32_to_cpu(firmware_info->v31.firmware_capability) &
|
||||
ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
|
||||
}
|
||||
}
|
||||
|
||||
return sram_ecc_supported;
|
||||
}
|
||||
|
||||
union smu_info {
|
||||
struct atom_smu_info_v3_1 v31;
|
||||
};
|
||||
@@ -346,11 +405,11 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
switch (crev) {
|
||||
case 4:
|
||||
adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
|
||||
adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
|
||||
adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
|
||||
adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
|
||||
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
|
||||
adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
|
||||
adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
|
||||
adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
|
||||
adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
|
||||
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
|
||||
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
|
||||
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
|
||||
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
|
||||
|
@@ -24,6 +24,8 @@
|
||||
#ifndef __AMDGPU_ATOMFIRMWARE_H__
|
||||
#define __AMDGPU_ATOMFIRMWARE_H__
|
||||
|
||||
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
|
||||
|
||||
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
|
||||
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
|
||||
@@ -31,5 +33,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
|
||||
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
|
||||
bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
||||
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
|
||||
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
|
||||
drm_syncobj_put(parser->post_dep_syncobjs[i]);
|
||||
kfree(parser->post_dep_syncobjs);
|
||||
for (i = 0; i < parser->num_post_deps; i++) {
|
||||
drm_syncobj_put(parser->post_deps[i].syncobj);
|
||||
kfree(parser->post_deps[i].chain);
|
||||
}
|
||||
kfree(parser->post_deps);
|
||||
|
||||
dma_fence_put(parser->fence);
|
||||
|
||||
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
|
||||
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
||||
uint32_t handle)
|
||||
uint32_t handle, u64 point,
|
||||
u64 flags)
|
||||
{
|
||||
int r;
|
||||
struct dma_fence *fence;
|
||||
r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
|
||||
if (r)
|
||||
int r;
|
||||
|
||||
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
|
||||
if (r) {
|
||||
DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
|
||||
handle, point, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
|
||||
dma_fence_put(fence);
|
||||
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
||||
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
unsigned num_deps;
|
||||
int i, r;
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
|
||||
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
|
||||
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
|
||||
0, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
unsigned num_deps;
|
||||
int i, r;
|
||||
|
||||
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
r = amdgpu_syncobj_lookup_and_add_to_sync(p,
|
||||
syncobj_deps[i].handle,
|
||||
syncobj_deps[i].point,
|
||||
syncobj_deps[i].flags);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
unsigned num_deps;
|
||||
int i;
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
|
||||
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
|
||||
p->post_dep_syncobjs = kmalloc_array(num_deps,
|
||||
sizeof(struct drm_syncobj *),
|
||||
GFP_KERNEL);
|
||||
p->num_post_dep_syncobjs = 0;
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
|
||||
if (!p->post_dep_syncobjs)
|
||||
if (!p->post_deps)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
p->post_deps[i].syncobj =
|
||||
drm_syncobj_find(p->filp, deps[i].handle);
|
||||
if (!p->post_deps[i].syncobj)
|
||||
return -EINVAL;
|
||||
p->post_deps[i].chain = NULL;
|
||||
p->post_deps[i].point = 0;
|
||||
p->num_post_deps++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk
|
||||
*chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
unsigned num_deps;
|
||||
int i;
|
||||
|
||||
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
|
||||
if (!p->post_deps)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
|
||||
if (!p->post_dep_syncobjs[i])
|
||||
struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
|
||||
|
||||
dep->chain = NULL;
|
||||
if (syncobj_deps[i].point) {
|
||||
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
|
||||
if (!dep->chain)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dep->syncobj = drm_syncobj_find(p->filp,
|
||||
syncobj_deps[i].handle);
|
||||
if (!dep->syncobj) {
|
||||
kfree(dep->chain);
|
||||
return -EINVAL;
|
||||
p->num_post_dep_syncobjs++;
|
||||
}
|
||||
dep->point = syncobj_deps[i].point;
|
||||
p->num_post_deps++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
|
||||
chunk = &p->chunks[i];
|
||||
|
||||
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
|
||||
chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
|
||||
switch (chunk->chunk_id) {
|
||||
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
||||
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
|
||||
r = amdgpu_cs_process_fence_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
||||
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
||||
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
|
||||
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
|
||||
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
|
||||
drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
|
||||
for (i = 0; i < p->num_post_deps; ++i) {
|
||||
if (p->post_deps[i].chain && p->post_deps[i].point) {
|
||||
drm_syncobj_add_point(p->post_deps[i].syncobj,
|
||||
p->post_deps[i].chain,
|
||||
p->fence, p->post_deps[i].point);
|
||||
p->post_deps[i].chain = NULL;
|
||||
} else {
|
||||
drm_syncobj_replace_fence(p->post_deps[i].syncobj,
|
||||
p->fence);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
@@ -92,15 +92,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
|
||||
size);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, *bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <drm/drm_auth.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_sched.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
#define to_amdgpu_ctx_entity(e) \
|
||||
container_of((e), struct amdgpu_ctx_entity, entity)
|
||||
@@ -344,6 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct amdgpu_ctx_mgr *mgr;
|
||||
uint32_t ras_counter;
|
||||
|
||||
if (!fpriv)
|
||||
return -EINVAL;
|
||||
@@ -368,6 +370,21 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
|
||||
if (atomic_read(&ctx->guilty))
|
||||
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
|
||||
|
||||
/*query ue count*/
|
||||
ras_counter = amdgpu_ras_query_error_count(adev, false);
|
||||
/*ras counter is monotonic increasing*/
|
||||
if (ras_counter != ctx->ras_counter_ue) {
|
||||
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
|
||||
ctx->ras_counter_ue = ras_counter;
|
||||
}
|
||||
|
||||
/*query ce count*/
|
||||
ras_counter = amdgpu_ras_query_error_count(adev, true);
|
||||
if (ras_counter != ctx->ras_counter_ce) {
|
||||
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
|
||||
ctx->ras_counter_ce = ras_counter;
|
||||
}
|
||||
|
||||
mutex_unlock(&mgr->lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -541,32 +558,26 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
|
||||
idr_init(&mgr->ctx_handles);
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
|
||||
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
|
||||
{
|
||||
unsigned num_entities = amdgput_ctx_total_num_entities();
|
||||
struct amdgpu_ctx *ctx;
|
||||
struct idr *idp;
|
||||
uint32_t id, i;
|
||||
long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
|
||||
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
idr_for_each_entry(idp, ctx, id) {
|
||||
|
||||
if (!ctx->adev) {
|
||||
mutex_unlock(&mgr->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_entities; i++) {
|
||||
struct drm_sched_entity *entity;
|
||||
|
||||
entity = &ctx->entities[0][i].entity;
|
||||
max_wait = drm_sched_entity_flush(entity, max_wait);
|
||||
timeout = drm_sched_entity_flush(entity, timeout);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
return timeout;
|
||||
}
|
||||
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
@@ -579,10 +590,6 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
idp = &mgr->ctx_handles;
|
||||
|
||||
idr_for_each_entry(idp, ctx, id) {
|
||||
|
||||
if (!ctx->adev)
|
||||
return;
|
||||
|
||||
if (kref_read(&ctx->refcount) != 1) {
|
||||
DRM_ERROR("ctx %p is still alive\n", ctx);
|
||||
continue;
|
||||
|
@@ -49,6 +49,8 @@ struct amdgpu_ctx {
|
||||
enum drm_sched_priority override_priority;
|
||||
struct mutex lock;
|
||||
atomic_t guilty;
|
||||
uint32_t ras_counter_ce;
|
||||
uint32_t ras_counter_ue;
|
||||
};
|
||||
|
||||
struct amdgpu_ctx_mgr {
|
||||
@@ -82,7 +84,7 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
|
||||
|
||||
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
|
||||
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
|
||||
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
|
||||
#endif
|
||||
|
@@ -568,10 +568,9 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
||||
idx = *pos >> 2;
|
||||
|
||||
valuesize = sizeof(values);
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
||||
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
||||
else
|
||||
return -EINVAL;
|
||||
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (size > valuesize)
|
||||
return -EINVAL;
|
||||
|
@@ -60,6 +60,7 @@
|
||||
#include "amdgpu_pm.h"
|
||||
|
||||
#include "amdgpu_xgmi.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
|
||||
@@ -1506,7 +1507,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
|
||||
adev->pm.pp_feature = amdgpu_pp_feature_mask;
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
@@ -1638,6 +1641,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_ras_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid)
|
||||
continue;
|
||||
@@ -1681,6 +1688,13 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
||||
goto init_failed;
|
||||
}
|
||||
|
||||
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
|
||||
if (r)
|
||||
goto init_failed;
|
||||
@@ -1869,6 +1883,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
amdgpu_ras_pre_fini(adev);
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1)
|
||||
amdgpu_xgmi_remove_device(adev);
|
||||
|
||||
@@ -1917,6 +1933,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
amdgpu_free_static_csa(&adev->virt.csa_obj);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
amdgpu_ib_pool_fini(adev);
|
||||
}
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
||||
@@ -1937,6 +1954,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
||||
adev->ip_blocks[i].status.late_initialized = false;
|
||||
}
|
||||
|
||||
amdgpu_ras_fini(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_virt_release_full_gpu(adev, false))
|
||||
DRM_ERROR("failed to release exclusive mode on fini\n");
|
||||
@@ -1999,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
|
||||
r = amdgpu_device_enable_mgpu_fan_boost();
|
||||
if (r)
|
||||
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
|
||||
|
||||
/*set to low pstate by default */
|
||||
amdgpu_xgmi_set_pstate(adev, 0);
|
||||
|
||||
}
|
||||
|
||||
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
|
||||
@@ -2369,7 +2392,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
|
||||
|
||||
adev->asic_reset_res = amdgpu_asic_reset(adev);
|
||||
if (adev->asic_reset_res)
|
||||
DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
|
||||
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
|
||||
adev->asic_reset_res, adev->ddev->unique);
|
||||
}
|
||||
|
||||
@@ -2448,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
mutex_init(&adev->virt.vf_errors.lock);
|
||||
hash_init(adev->mn_hash);
|
||||
mutex_init(&adev->lock_reset);
|
||||
mutex_init(&adev->virt.dpm_mutex);
|
||||
|
||||
amdgpu_device_check_arguments(adev);
|
||||
|
||||
@@ -2642,13 +2666,6 @@ fence_driver_init:
|
||||
/* Get a log2 for easy divisions. */
|
||||
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
|
||||
|
||||
r = amdgpu_ib_pool_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_pm_sysfs_init(adev);
|
||||
@@ -2694,6 +2711,9 @@ fence_driver_init:
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* must succeed. */
|
||||
amdgpu_ras_post_init(adev);
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
@@ -2726,7 +2746,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
else
|
||||
drm_atomic_helper_shutdown(adev->ddev);
|
||||
}
|
||||
amdgpu_ib_pool_fini(adev);
|
||||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
@@ -3225,6 +3244,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
/* Resume IP prior to SMC */
|
||||
r = amdgpu_device_ip_reinit_early_sriov(adev);
|
||||
if (r)
|
||||
@@ -3244,6 +3265,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
amdgpu_amdkfd_post_reset(adev);
|
||||
|
||||
error:
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
@@ -3376,7 +3398,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
r = amdgpu_asic_reset(tmp_adev);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
|
||||
DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
|
||||
r, tmp_adev->ddev->unique);
|
||||
break;
|
||||
}
|
||||
@@ -3393,6 +3415,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp_adev, device_list_handle,
|
||||
gmc.xgmi.head) {
|
||||
amdgpu_ras_reserve_bad_pages(tmp_adev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3411,7 +3438,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
|
||||
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
|
||||
if (vram_lost) {
|
||||
DRM_ERROR("VRAM is lost!\n");
|
||||
DRM_INFO("VRAM is lost due to GPU reset!\n");
|
||||
atomic_inc(&tmp_adev->vram_lost_counter);
|
||||
}
|
||||
|
||||
|
@@ -904,3 +904,19 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
||||
{
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_get_sclk(&adev->smu, low);
|
||||
else
|
||||
return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
|
||||
}
|
||||
|
||||
int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
||||
{
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_get_mclk(&adev->smu, low);
|
||||
else
|
||||
return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
|
||||
}
|
||||
|
@@ -260,9 +260,6 @@ enum amdgpu_pcie_gen {
|
||||
#define amdgpu_dpm_enable_bapm(adev, e) \
|
||||
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
|
||||
|
||||
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
|
||||
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
|
||||
|
||||
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
|
||||
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
|
||||
|
||||
@@ -281,18 +278,18 @@ enum amdgpu_pcie_gen {
|
||||
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
|
||||
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
|
||||
|
||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||
((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
|
||||
|
||||
#define amdgpu_dpm_get_mclk(adev, l) \
|
||||
((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
|
||||
|
||||
#define amdgpu_dpm_force_performance_level(adev, l) \
|
||||
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
|
||||
|
||||
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
|
||||
|
||||
#define amdgpu_smu_get_current_power_state(adev) \
|
||||
((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
|
||||
|
||||
#define amdgpu_smu_set_power_state(adev) \
|
||||
((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
|
||||
|
||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
|
||||
|
||||
@@ -448,6 +445,9 @@ struct amdgpu_pm {
|
||||
uint32_t smu_prv_buffer_size;
|
||||
struct amdgpu_bo *smu_prv_buffer;
|
||||
bool ac_power;
|
||||
/* powerplay feature */
|
||||
uint32_t pp_feature;
|
||||
|
||||
};
|
||||
|
||||
#define R600_SSTU_DFLT 0
|
||||
@@ -486,6 +486,8 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
|
||||
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
|
||||
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
|
||||
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
|
||||
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
|
||||
void *data, uint32_t *size);
|
||||
|
||||
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
|
||||
|
||||
@@ -504,4 +506,8 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
struct amd_vce_state*
|
||||
amdgpu_get_vce_clock_state(void *handle, u32 idx);
|
||||
|
||||
extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
|
||||
|
||||
extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
|
||||
|
||||
#endif
|
||||
|
@@ -74,9 +74,11 @@
|
||||
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
|
||||
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
|
||||
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
|
||||
* - 3.31.0 - Add support for per-flip tiling attribute changes with DC
|
||||
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 30
|
||||
#define KMS_DRIVER_MINOR 32
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
@@ -117,8 +119,8 @@ uint amdgpu_pg_mask = 0xffffffff;
|
||||
uint amdgpu_sdma_phase_quantum = 32;
|
||||
char *amdgpu_disable_cu = NULL;
|
||||
char *amdgpu_virtual_display = NULL;
|
||||
/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
|
||||
uint amdgpu_pp_feature_mask = 0xfffd3fff;
|
||||
/* OverDrive(bit 14) disabled by default*/
|
||||
uint amdgpu_pp_feature_mask = 0xffffbfff;
|
||||
int amdgpu_ngg = 0;
|
||||
int amdgpu_prim_buf_per_se = 0;
|
||||
int amdgpu_pos_buf_per_se = 0;
|
||||
@@ -136,6 +138,8 @@ uint amdgpu_dc_feature_mask = 0;
|
||||
struct amdgpu_mgpu_info mgpu_info = {
|
||||
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
|
||||
};
|
||||
int amdgpu_ras_enable = -1;
|
||||
uint amdgpu_ras_mask = 0xffffffff;
|
||||
|
||||
/**
|
||||
* DOC: vramlimit (int)
|
||||
@@ -494,6 +498,21 @@ module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
|
||||
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
|
||||
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: ras_enable (int)
|
||||
* Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
|
||||
*/
|
||||
MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
|
||||
module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
|
||||
|
||||
/**
|
||||
* DOC: ras_mask (uint)
|
||||
* Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
|
||||
* See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
|
||||
*/
|
||||
MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
|
||||
module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: si_support (int)
|
||||
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
|
||||
@@ -974,6 +993,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
|
||||
DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
|
||||
drm_dev_unplug(dev);
|
||||
drm_dev_put(dev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
@@ -1158,13 +1178,14 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
|
||||
{
|
||||
struct drm_file *file_priv = f->private_data;
|
||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||
long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
|
||||
|
||||
amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
|
||||
timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
|
||||
timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
|
||||
|
||||
return 0;
|
||||
return timeout >= 0 ? 0 : timeout;
|
||||
}
|
||||
|
||||
|
||||
static const struct file_operations amdgpu_driver_kms_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
|
@@ -49,12 +49,11 @@
|
||||
static int
|
||||
amdgpufb_open(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
int ret = pm_runtime_get_sync(adev->ddev->dev);
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
int ret = pm_runtime_get_sync(fb_helper->dev->dev);
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
pm_runtime_mark_last_busy(fb_helper->dev->dev);
|
||||
pm_runtime_put_autosuspend(fb_helper->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@@ -63,11 +62,10 @@ amdgpufb_open(struct fb_info *info, int user)
|
||||
static int
|
||||
amdgpufb_release(struct fb_info *info, int user)
|
||||
{
|
||||
struct amdgpu_fbdev *rfbdev = info->par;
|
||||
struct amdgpu_device *adev = rfbdev->adev;
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
pm_runtime_mark_last_busy(adev->ddev->dev);
|
||||
pm_runtime_put_autosuspend(adev->ddev->dev);
|
||||
pm_runtime_mark_last_busy(fb_helper->dev->dev);
|
||||
pm_runtime_put_autosuspend(fb_helper->dev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -233,9 +231,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
goto out;
|
||||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
|
||||
&mode_cmd, gobj);
|
||||
if (ret) {
|
||||
@@ -248,10 +243,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
/* setup helper */
|
||||
rfbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "amdgpudrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
|
||||
info->fbops = &amdgpufb_ops;
|
||||
|
||||
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
|
||||
@@ -260,7 +251,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
||||
info->screen_base = amdgpu_bo_kptr(abo);
|
||||
info->screen_size = amdgpu_bo_size(abo);
|
||||
|
||||
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
|
||||
|
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_fence *fence;
|
||||
struct dma_fence *old, **ptr;
|
||||
struct dma_fence __rcu **ptr;
|
||||
uint32_t seq;
|
||||
int r;
|
||||
|
||||
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
|
||||
seq, flags | AMDGPU_FENCE_FLAG_INT);
|
||||
|
||||
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
|
||||
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
|
||||
struct dma_fence *old;
|
||||
|
||||
rcu_read_lock();
|
||||
old = dma_fence_get_rcu_safe(ptr);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (old) {
|
||||
r = dma_fence_wait(old, false);
|
||||
dma_fence_put(old);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
/* This function can't be called concurrently anyway, otherwise
|
||||
* emitting the fence would mess up the hardware ring buffer.
|
||||
*/
|
||||
old = rcu_dereference_protected(*ptr, 1);
|
||||
if (old && !dma_fence_is_signaled(old)) {
|
||||
DRM_INFO("rcu slot is busy\n");
|
||||
dma_fence_wait(old, false);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
|
||||
|
||||
*f = &fence->base;
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
|
||||
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
@@ -627,11 +628,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (args->operation) {
|
||||
case AMDGPU_VA_OP_MAP:
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
|
||||
args->offset_in_bo, args->map_size,
|
||||
@@ -647,11 +643,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
args->map_size);
|
||||
break;
|
||||
case AMDGPU_VA_OP_REPLACE:
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
|
||||
args->map_size);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
|
||||
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
|
||||
args->offset_in_bo, args->map_size,
|
||||
@@ -678,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_amdgpu_gem_op *args = data;
|
||||
struct drm_gem_object *gobj;
|
||||
struct amdgpu_vm_bo_base *base;
|
||||
struct amdgpu_bo *robj;
|
||||
int r;
|
||||
|
||||
@@ -716,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||
amdgpu_bo_unreserve(robj);
|
||||
break;
|
||||
}
|
||||
for (base = robj->vm_bo; base; base = base->next)
|
||||
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
|
||||
amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
|
||||
r = -EINVAL;
|
||||
amdgpu_bo_unreserve(robj);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
|
||||
AMDGPU_GEM_DOMAIN_GTT |
|
||||
AMDGPU_GEM_DOMAIN_CPU);
|
||||
@@ -745,17 +746,25 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
u32 domain;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* The buffer returned from this function should be cleared, but
|
||||
* it can only be done if the ring is enabled or we'll fail to
|
||||
* create the buffer.
|
||||
*/
|
||||
if (adev->mman.buffer_funcs_enabled)
|
||||
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||
|
||||
args->pitch = amdgpu_align_pitch(adev, args->width,
|
||||
DIV_ROUND_UP(args->bpp, 8), 0);
|
||||
args->size = (u64)args->pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev,
|
||||
amdgpu_display_supported_domains(adev));
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
|
||||
ttm_bo_type_device, NULL, &gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
|
@@ -390,7 +390,7 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
|
||||
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
return;
|
||||
|
||||
if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
|
||||
|
@@ -258,6 +258,9 @@ struct amdgpu_gfx {
|
||||
/* pipe reservation */
|
||||
struct mutex pipe_reserve_mutex;
|
||||
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
|
||||
|
||||
/*ras */
|
||||
struct ras_common_if *ras_if;
|
||||
};
|
||||
|
||||
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
|
||||
|
@@ -79,6 +79,33 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
|
||||
return pd_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gmc_set_pte_pde - update the page tables using CPU
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @cpu_pt_addr: cpu address of the page table
|
||||
* @gpu_page_idx: entry in the page table to update
|
||||
* @addr: dst addr to write into pte/pde
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using CPU.
|
||||
*/
|
||||
int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)cpu_pt_addr;
|
||||
uint64_t value;
|
||||
|
||||
/*
|
||||
* The following is for PTE only. GART does not have PDEs.
|
||||
*/
|
||||
value = addr & 0x0000FFFFFFFFF000ULL;
|
||||
value |= flags;
|
||||
writeq(value, ptr + (gpu_page_idx * 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gmc_agp_addr - return the address in the AGP address space
|
||||
*
|
||||
@@ -213,3 +240,58 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
|
||||
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
|
||||
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_gmc_filter_faults - filter VM faults
|
||||
*
|
||||
* @adev: amdgpu device structure
|
||||
* @addr: address of the VM fault
|
||||
* @pasid: PASID of the process causing the fault
|
||||
* @timestamp: timestamp of the fault
|
||||
*
|
||||
* Returns:
|
||||
* True if the fault was filtered and should not be processed further.
|
||||
* False if the fault is a new one and needs to be handled.
|
||||
*/
|
||||
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
||||
uint16_t pasid, uint64_t timestamp)
|
||||
{
|
||||
struct amdgpu_gmc *gmc = &adev->gmc;
|
||||
|
||||
uint64_t stamp, key = addr << 4 | pasid;
|
||||
struct amdgpu_gmc_fault *fault;
|
||||
uint32_t hash;
|
||||
|
||||
/* If we don't have space left in the ring buffer return immediately */
|
||||
stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
|
||||
AMDGPU_GMC_FAULT_TIMEOUT;
|
||||
if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
|
||||
return true;
|
||||
|
||||
/* Try to find the fault in the hash */
|
||||
hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
|
||||
fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
|
||||
while (fault->timestamp >= stamp) {
|
||||
uint64_t tmp;
|
||||
|
||||
if (fault->key == key)
|
||||
return true;
|
||||
|
||||
tmp = fault->timestamp;
|
||||
fault = &gmc->fault_ring[fault->next];
|
||||
|
||||
/* Check if the entry was reused */
|
||||
if (fault->timestamp >= tmp)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Add the fault to the ring */
|
||||
fault = &gmc->fault_ring[gmc->last_fault];
|
||||
fault->key = key;
|
||||
fault->timestamp = timestamp;
|
||||
|
||||
/* And update the hash */
|
||||
fault->next = gmc->fault_hash[hash].idx;
|
||||
gmc->fault_hash[hash].idx = gmc->last_fault++;
|
||||
return false;
|
||||
}
|
||||
|
@@ -43,8 +43,34 @@
|
||||
*/
|
||||
#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
|
||||
|
||||
/*
|
||||
* Ring size as power of two for the log of recent faults.
|
||||
*/
|
||||
#define AMDGPU_GMC_FAULT_RING_ORDER 8
|
||||
#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
|
||||
|
||||
/*
|
||||
* Hash size as power of two for the log of recent faults
|
||||
*/
|
||||
#define AMDGPU_GMC_FAULT_HASH_ORDER 8
|
||||
#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
|
||||
|
||||
/*
|
||||
* Number of IH timestamp ticks until a fault is considered handled
|
||||
*/
|
||||
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
|
||||
|
||||
struct firmware;
|
||||
|
||||
/*
|
||||
* GMC page fault information
|
||||
*/
|
||||
struct amdgpu_gmc_fault {
|
||||
uint64_t timestamp;
|
||||
uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
|
||||
uint64_t key:52;
|
||||
};
|
||||
|
||||
/*
|
||||
* VMHUB structures, functions & helpers
|
||||
*/
|
||||
@@ -71,12 +97,6 @@ struct amdgpu_gmc_funcs {
|
||||
/* Change the VMID -> PASID mapping */
|
||||
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
|
||||
unsigned pasid);
|
||||
/* write pte/pde updates using the cpu */
|
||||
int (*set_pte_pde)(struct amdgpu_device *adev,
|
||||
void *cpu_pt_addr, /* cpu addr of page table */
|
||||
uint32_t gpu_page_idx, /* pte/pde to update */
|
||||
uint64_t addr, /* addr to write into pte/pde */
|
||||
uint64_t flags); /* access flags */
|
||||
/* enable/disable PRT support */
|
||||
void (*set_prt)(struct amdgpu_device *adev, bool enable);
|
||||
/* set pte flags based per asic */
|
||||
@@ -147,15 +167,22 @@ struct amdgpu_gmc {
|
||||
struct kfd_vm_fault_info *vm_fault_info;
|
||||
atomic_t vm_fault_info_updated;
|
||||
|
||||
struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
|
||||
struct {
|
||||
uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
|
||||
} fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
|
||||
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
|
||||
|
||||
const struct amdgpu_gmc_funcs *gmc_funcs;
|
||||
|
||||
struct amdgpu_xgmi xgmi;
|
||||
struct amdgpu_irq_src ecc_irq;
|
||||
struct ras_common_if *ras_if;
|
||||
};
|
||||
|
||||
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
|
||||
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
|
||||
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
|
||||
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
|
||||
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
|
||||
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
|
||||
|
||||
@@ -189,6 +216,9 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
|
||||
|
||||
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
|
||||
uint64_t *addr, uint64_t *flags);
|
||||
int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags);
|
||||
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
|
||||
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
|
||||
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
|
||||
@@ -197,5 +227,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_gmc *mc);
|
||||
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_gmc *mc);
|
||||
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
|
||||
uint16_t pasid, uint64_t timestamp);
|
||||
|
||||
#endif
|
||||
|
@@ -36,6 +36,47 @@ struct amdgpu_gtt_node {
|
||||
struct ttm_buffer_object *tbo;
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: mem_info_gtt_total
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total size of
|
||||
* the GTT.
|
||||
* The file mem_info_gtt_total is used for this, and returns the total size of
|
||||
* the GTT block, in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
(adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: mem_info_gtt_used
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total amount of
|
||||
* used GTT.
|
||||
* The file mem_info_gtt_used is used for this, and returns the current used
|
||||
* size of the GTT block, in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
|
||||
amdgpu_mem_info_gtt_total_show, NULL);
|
||||
static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
|
||||
amdgpu_mem_info_gtt_used_show, NULL);
|
||||
|
||||
/**
|
||||
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
|
||||
*
|
||||
@@ -50,6 +91,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
||||
struct amdgpu_gtt_mgr *mgr;
|
||||
uint64_t start, size;
|
||||
int ret;
|
||||
|
||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
if (!mgr)
|
||||
@@ -61,6 +103,18 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
||||
spin_lock_init(&mgr->lock);
|
||||
atomic64_set(&mgr->available, p_size);
|
||||
man->priv = mgr;
|
||||
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -74,12 +128,17 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
||||
*/
|
||||
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||
spin_lock(&mgr->lock);
|
||||
drm_mm_takedown(&mgr->mm);
|
||||
spin_unlock(&mgr->lock);
|
||||
kfree(mgr);
|
||||
man->priv = NULL;
|
||||
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -142,6 +142,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
|
||||
*/
|
||||
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
|
||||
{
|
||||
unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
|
||||
u32 wptr;
|
||||
|
||||
if (!ih->enabled || adev->shutdown)
|
||||
@@ -159,7 +160,7 @@ restart_ih:
|
||||
/* Order reading of wptr vs. reading of IH ring data */
|
||||
rmb();
|
||||
|
||||
while (ih->rptr != wptr) {
|
||||
while (ih->rptr != wptr && --count) {
|
||||
amdgpu_irq_dispatch(adev, ih);
|
||||
ih->rptr &= ih->ptr_mask;
|
||||
}
|
||||
|
@@ -24,6 +24,9 @@
|
||||
#ifndef __AMDGPU_IH_H__
|
||||
#define __AMDGPU_IH_H__
|
||||
|
||||
/* Maximum number of IVs processed at once */
|
||||
#define AMDGPU_IH_MAX_NUM_IVS 32
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_iv_entry;
|
||||
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_gem.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
@@ -296,6 +297,17 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->ver = adev->pm.fw_version;
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_TA:
|
||||
if (query_fw->index > 1)
|
||||
return -EINVAL;
|
||||
if (query_fw->index == 0) {
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
|
||||
} else {
|
||||
fw_info->ver = adev->psp.ta_fw_version;
|
||||
fw_info->feature = adev->psp.ta_ras_ucode_version;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SDMA:
|
||||
if (query_fw->index >= adev->sdma.num_instances)
|
||||
return -EINVAL;
|
||||
@@ -684,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
if (adev->pm.dpm_enabled) {
|
||||
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
|
||||
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
|
||||
} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
|
||||
adev->virt.ops->get_pp_clk) {
|
||||
dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
|
||||
dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
|
||||
} else {
|
||||
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
|
||||
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
|
||||
@@ -909,6 +925,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
case AMDGPU_INFO_VRAM_LOST_COUNTER:
|
||||
ui32 = atomic_read(&adev->vram_lost_counter);
|
||||
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
|
||||
case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
uint64_t ras_mask;
|
||||
|
||||
if (!ras)
|
||||
return -EINVAL;
|
||||
ras_mask = (uint64_t)ras->supported << 32 | ras->features;
|
||||
|
||||
return copy_to_user(out, &ras_mask,
|
||||
min_t(u64, size, sizeof(ras_mask))) ?
|
||||
-EFAULT : 0;
|
||||
}
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
|
||||
return -EINVAL;
|
||||
@@ -1328,6 +1356,16 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_TA;
|
||||
for (i = 0; i < 2; i++) {
|
||||
query_fw.index = i;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
continue;
|
||||
seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
|
||||
i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
|
||||
}
|
||||
|
||||
/* SMC */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
|
@@ -58,7 +58,7 @@ struct amdgpu_hpd;
|
||||
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
|
||||
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
|
||||
|
||||
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
|
||||
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
|
||||
|
||||
#define AMDGPU_MAX_HPD_PINS 6
|
||||
#define AMDGPU_MAX_CRTCS 6
|
||||
@@ -406,7 +406,7 @@ struct amdgpu_crtc {
|
||||
struct amdgpu_flip_work *pflip_works;
|
||||
enum amdgpu_flip_status pflip_status;
|
||||
int deferred_flip_completion;
|
||||
u64 last_flip_vblank;
|
||||
u32 last_flip_vblank;
|
||||
/* pll sharing */
|
||||
struct amdgpu_atom_ss ss;
|
||||
bool ss_enabled;
|
||||
|
@@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
if (bo->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
|
||||
if (!list_empty(&bo->shadow_list)) {
|
||||
mutex_lock(&adev->shadow_list_lock);
|
||||
list_del_init(&bo->shadow_list);
|
||||
mutex_unlock(&adev->shadow_list_lock);
|
||||
}
|
||||
amdgpu_bo_unref(&bo->parent);
|
||||
|
||||
kfree(bo->metadata);
|
||||
kfree(bo);
|
||||
}
|
||||
|
@@ -72,6 +72,8 @@ struct amdgpu_bo_va {
|
||||
|
||||
/* If the mappings are cleared or filled */
|
||||
bool cleared;
|
||||
|
||||
bool is_xgmi;
|
||||
};
|
||||
|
||||
struct amdgpu_bo {
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_smu.h"
|
||||
#include "atom.h"
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/hwmon.h>
|
||||
@@ -80,6 +81,27 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
|
||||
void *data, uint32_t *size)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!data || !size)
|
||||
return -EINVAL;
|
||||
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_read_sensor(&adev->smu, sensor, data, size);
|
||||
else {
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
||||
ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
|
||||
sensor, data, size);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: power_dpm_state
|
||||
*
|
||||
@@ -122,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
enum amd_pm_state_type pm;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_current_power_state)
|
||||
if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
|
||||
pm = amdgpu_smu_get_current_power_state(adev);
|
||||
else if (adev->powerplay.pp_funcs->get_current_power_state)
|
||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
||||
else
|
||||
pm = adev->pm.dpm.user_state;
|
||||
@@ -240,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return snprintf(buf, PAGE_SIZE, "off\n");
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_performance_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
level = smu_get_performance_level(&adev->smu);
|
||||
else if (adev->powerplay.pp_funcs->get_performance_level)
|
||||
level = amdgpu_dpm_get_performance_level(adev);
|
||||
else
|
||||
level = adev->pm.dpm.forced_level;
|
||||
@@ -273,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_performance_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
current_level = smu_get_performance_level(&adev->smu);
|
||||
else if (adev->powerplay.pp_funcs->get_performance_level)
|
||||
current_level = amdgpu_dpm_get_performance_level(adev);
|
||||
|
||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||
@@ -299,10 +327,35 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (amdgim_is_hwperf(adev) &&
|
||||
adev->virt.ops->force_dpm_level) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->virt.ops->force_dpm_level(adev, level);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return count;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (current_level == level)
|
||||
return count;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_performance_level) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
count = -EINVAL;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
goto fail;
|
||||
}
|
||||
ret = smu_force_performance_level(&adev->smu, level);
|
||||
if (ret)
|
||||
count = -EINVAL;
|
||||
else
|
||||
adev->pm.dpm.forced_level = level;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
} else if (adev->powerplay.pp_funcs->force_performance_level) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
count = -EINVAL;
|
||||
@@ -328,9 +381,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
struct pp_states_info data;
|
||||
int i, buf_len;
|
||||
int i, buf_len, ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_pp_num_states)
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_get_power_num_states(&adev->smu, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (adev->powerplay.pp_funcs->get_pp_num_states)
|
||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
||||
|
||||
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
|
||||
@@ -351,23 +408,29 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
struct pp_states_info data;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
enum amd_pm_state_type pm = 0;
|
||||
int i = 0;
|
||||
int i = 0, ret = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_current_power_state
|
||||
if (is_support_sw_smu(adev)) {
|
||||
pm = smu_get_current_power_state(smu);
|
||||
ret = smu_get_power_num_states(smu, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (adev->powerplay.pp_funcs->get_current_power_state
|
||||
&& adev->powerplay.pp_funcs->get_pp_num_states) {
|
||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
||||
|
||||
for (i = 0; i < data.nums; i++) {
|
||||
if (pm == data.states[i])
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == data.nums)
|
||||
i = -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < data.nums; i++) {
|
||||
if (pm == data.states[i])
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == data.nums)
|
||||
i = -EINVAL;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", i);
|
||||
}
|
||||
|
||||
@@ -397,6 +460,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
||||
|
||||
if (strlen(buf) == 1)
|
||||
adev->pp_force_state_enabled = false;
|
||||
else if (is_support_sw_smu(adev))
|
||||
adev->pp_force_state_enabled = false;
|
||||
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
|
||||
adev->powerplay.pp_funcs->get_pp_num_states) {
|
||||
struct pp_states_info data;
|
||||
@@ -442,7 +507,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
|
||||
char *table = NULL;
|
||||
int size;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_pp_table)
|
||||
if (is_support_sw_smu(adev)) {
|
||||
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
|
||||
if (size < 0)
|
||||
return size;
|
||||
}
|
||||
else if (adev->powerplay.pp_funcs->get_pp_table)
|
||||
size = amdgpu_dpm_get_pp_table(adev, &table);
|
||||
else
|
||||
return 0;
|
||||
@@ -462,8 +532,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->set_pp_table)
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (adev->powerplay.pp_funcs->set_pp_table)
|
||||
amdgpu_dpm_set_pp_table(adev, buf, count);
|
||||
|
||||
return count;
|
||||
@@ -586,19 +661,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
tmp_str++;
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
|
||||
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_od_edit_dpm_table(&adev->smu, type,
|
||||
parameter, parameter_size);
|
||||
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
|
||||
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
|
||||
parameter, parameter_size);
|
||||
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (type == PP_OD_COMMIT_DPM_TABLE) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
|
||||
return count;
|
||||
} else {
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
if (type == PP_OD_COMMIT_DPM_TABLE) {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev,
|
||||
AMD_PP_TASK_READJUST_POWER_STATE,
|
||||
NULL);
|
||||
return count;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -613,7 +698,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t size = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
|
||||
size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
|
||||
size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
|
||||
size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
|
||||
return size;
|
||||
} else if (adev->powerplay.pp_funcs->print_clock_levels) {
|
||||
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
|
||||
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
|
||||
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
|
||||
@@ -711,7 +802,13 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
|
||||
adev->virt.ops->get_pp_clk)
|
||||
return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
|
||||
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -767,7 +864,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -783,7 +882,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -803,7 +904,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -819,7 +922,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -839,7 +944,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -855,7 +962,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -875,7 +984,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -891,7 +1002,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -911,7 +1024,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -927,7 +1042,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
|
||||
else if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
|
||||
else
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -947,7 +1064,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
|
||||
else if (adev->powerplay.pp_funcs->force_clock_level)
|
||||
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
||||
|
||||
if (ret)
|
||||
@@ -964,7 +1083,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t value = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_sclk_od)
|
||||
if (is_support_sw_smu(adev))
|
||||
value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
|
||||
else if (adev->powerplay.pp_funcs->get_sclk_od)
|
||||
value = amdgpu_dpm_get_sclk_od(adev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
||||
@@ -986,14 +1107,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->set_sclk_od)
|
||||
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
|
||||
if (is_support_sw_smu(adev)) {
|
||||
value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
if (adev->powerplay.pp_funcs->set_sclk_od)
|
||||
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
@@ -1008,7 +1134,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
uint32_t value = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_mclk_od)
|
||||
if (is_support_sw_smu(adev))
|
||||
value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
|
||||
else if (adev->powerplay.pp_funcs->get_mclk_od)
|
||||
value = amdgpu_dpm_get_mclk_od(adev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
||||
@@ -1030,14 +1158,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
||||
count = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if (adev->powerplay.pp_funcs->set_mclk_od)
|
||||
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
|
||||
if (is_support_sw_smu(adev)) {
|
||||
value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
if (adev->powerplay.pp_funcs->set_mclk_od)
|
||||
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
|
||||
} else {
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
@@ -1071,7 +1204,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_power_profile_mode)
|
||||
if (is_support_sw_smu(adev))
|
||||
return smu_get_power_profile_mode(&adev->smu, buf);
|
||||
else if (adev->powerplay.pp_funcs->get_power_profile_mode)
|
||||
return amdgpu_dpm_get_power_profile_mode(adev, buf);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
@@ -1121,9 +1256,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
|
||||
}
|
||||
}
|
||||
parameter[parameter_size] = profile_mode;
|
||||
if (adev->powerplay.pp_funcs->set_power_profile_mode)
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
|
||||
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
|
||||
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
|
||||
|
||||
if (!ret)
|
||||
return count;
|
||||
fail:
|
||||
@@ -1146,14 +1282,10 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
int r, value, size = sizeof(value);
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* read the IP busy sensor */
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
|
||||
(void *)&value, &size);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -1247,11 +1379,6 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* get the temperature */
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
(void *)&temp, &size);
|
||||
@@ -1283,11 +1410,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
||||
{
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 pwm_mode = 0;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||
} else {
|
||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%i\n", pwm_mode);
|
||||
}
|
||||
@@ -1306,14 +1436,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
smu_set_fan_control_mode(&adev->smu, value);
|
||||
} else {
|
||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
amdgpu_dpm_set_fan_control_mode(adev, value);
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
amdgpu_dpm_set_fan_control_mode(adev, value);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
@@ -1345,8 +1483,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (is_support_sw_smu(adev))
|
||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||
else
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
|
||||
pr_info("manual fan speed control should be enabled first\n");
|
||||
return -EINVAL;
|
||||
@@ -1358,7 +1498,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
||||
|
||||
value = (value * 100) / 255;
|
||||
|
||||
if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_set_fan_speed_percent(&adev->smu, value);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
|
||||
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1380,7 +1524,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_get_fan_speed_percent(&adev->smu, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
|
||||
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1404,7 +1552,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_get_current_rpm(&adev->smu, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1422,9 +1574,6 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
|
||||
u32 size = sizeof(min_rpm);
|
||||
int r;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->read_sensor)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
|
||||
(void *)&min_rpm, &size);
|
||||
if (r)
|
||||
@@ -1442,9 +1591,6 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
|
||||
u32 size = sizeof(max_rpm);
|
||||
int r;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->read_sensor)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
|
||||
(void *)&max_rpm, &size);
|
||||
if (r)
|
||||
@@ -1466,7 +1612,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_get_current_rpm(&adev->smu, &rpm);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1484,7 +1634,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||
u32 value;
|
||||
u32 pwm_mode;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (is_support_sw_smu(adev))
|
||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||
else
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
|
||||
if (pwm_mode != AMD_FAN_CTRL_MANUAL)
|
||||
return -ENODATA;
|
||||
|
||||
@@ -1497,7 +1651,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
err = smu_set_fan_speed_rpm(&adev->smu, value);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
|
||||
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1513,11 +1671,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
u32 pwm_mode = 0;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
if (is_support_sw_smu(adev)) {
|
||||
pwm_mode = smu_get_fan_control_mode(&adev->smu);
|
||||
} else {
|
||||
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||
}
|
||||
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
|
||||
}
|
||||
|
||||
@@ -1536,8 +1697,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
|
||||
err = kstrtoint(buf, 10, &value);
|
||||
if (err)
|
||||
@@ -1550,7 +1709,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
||||
if (is_support_sw_smu(adev)) {
|
||||
smu_set_fan_control_mode(&adev->smu, pwm_mode);
|
||||
} else {
|
||||
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||
return -EINVAL;
|
||||
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
@@ -1569,11 +1734,6 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* get the voltage */
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
|
||||
(void *)&vddgfx, &size);
|
||||
@@ -1608,11 +1768,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* get the voltage */
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
|
||||
(void *)&vddnb, &size);
|
||||
@@ -1644,11 +1799,6 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* get the voltage */
|
||||
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
|
||||
(void *)&query, &size);
|
||||
@@ -1675,7 +1825,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
uint32_t limit = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
smu_get_power_limit(&adev->smu, &limit, true);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else {
|
||||
@@ -1690,7 +1843,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
|
||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||
uint32_t limit = 0;
|
||||
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
smu_get_power_limit(&adev->smu, &limit, false);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
|
||||
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
|
||||
} else {
|
||||
@@ -1713,7 +1869,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
|
||||
return err;
|
||||
|
||||
value = value / 1000000; /* convert to Watt */
|
||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
adev->smu.funcs->set_power_limit(&adev->smu, value);
|
||||
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
|
||||
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1967,18 +2125,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
||||
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
||||
effective_mode &= ~S_IRUGO;
|
||||
if (!is_support_sw_smu(adev)) {
|
||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
||||
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
||||
effective_mode &= ~S_IRUGO;
|
||||
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
||||
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
||||
effective_mode &= ~S_IWUSR;
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
||||
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
|
||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
||||
effective_mode &= ~S_IWUSR;
|
||||
}
|
||||
|
||||
if ((adev->flags & AMD_IS_APU) &&
|
||||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
|
||||
@@ -1987,20 +2147,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
||||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
/* hide max/min values if we can't both query and manage the fan */
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
|
||||
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
if (!is_support_sw_smu(adev)) {
|
||||
/* hide max/min values if we can't both query and manage the fan */
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
|
||||
(!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||
return 0;
|
||||
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
|
||||
return 0;
|
||||
if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
|
||||
!adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
|
||||
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
|
||||
attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* only APUs have vddnb */
|
||||
if (!(adev->flags & AMD_IS_APU) &&
|
||||
@@ -2039,9 +2201,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
|
||||
if (adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor &&
|
||||
!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
(void *)&temp, &size)) {
|
||||
if (temp < adev->pm.dpm.thermal.min_temp)
|
||||
/* switch back the user state */
|
||||
@@ -2267,7 +2427,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||
int ret = 0;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
|
||||
if (ret)
|
||||
DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
|
||||
enable ? "true" : "false", ret);
|
||||
} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||
/* enable/disable UVD */
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
|
||||
@@ -2288,7 +2454,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||
int ret = 0;
|
||||
if (is_support_sw_smu(adev)) {
|
||||
ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
|
||||
if (ret)
|
||||
DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
|
||||
enable ? "true" : "false", ret);
|
||||
} else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
|
||||
/* enable/disable VCE */
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
|
||||
@@ -2413,7 +2585,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||
"pp_power_profile_mode\n");
|
||||
return ret;
|
||||
}
|
||||
if (hwmgr->od_enabled) {
|
||||
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
|
||||
(!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
|
||||
ret = device_create_file(adev->dev,
|
||||
&dev_attr_pp_od_clk_voltage);
|
||||
if (ret) {
|
||||
@@ -2489,7 +2662,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
|
||||
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_power_profile_mode);
|
||||
if (hwmgr->od_enabled)
|
||||
if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
|
||||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
|
||||
device_remove_file(adev->dev,
|
||||
&dev_attr_pp_od_clk_voltage);
|
||||
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
|
||||
@@ -2516,28 +2690,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
||||
amdgpu_fence_wait_empty(ring);
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
if (is_support_sw_smu(adev)) {
|
||||
struct smu_context *smu = &adev->smu;
|
||||
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
|
||||
mutex_lock(&(smu->mutex));
|
||||
smu_handle_task(&adev->smu,
|
||||
smu_dpm->dpm_level,
|
||||
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
|
||||
mutex_unlock(&(smu->mutex));
|
||||
} else {
|
||||
if (adev->powerplay.pp_funcs->dispatch_tasks) {
|
||||
if (!amdgpu_device_has_dc_support(adev)) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
|
||||
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
|
||||
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
|
||||
if (adev->pm.pm_display_cfg.vrefresh > 120)
|
||||
adev->pm.pm_display_cfg.min_vblank_time = 0;
|
||||
if (adev->powerplay.pp_funcs->display_configuration_change)
|
||||
adev->powerplay.pp_funcs->display_configuration_change(
|
||||
adev->powerplay.pp_handle,
|
||||
&adev->pm.pm_display_cfg);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
|
||||
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
|
||||
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
|
||||
if (adev->pm.pm_display_cfg.vrefresh > 120)
|
||||
adev->pm.pm_display_cfg.min_vblank_time = 0;
|
||||
if (adev->powerplay.pp_funcs->display_configuration_change)
|
||||
adev->powerplay.pp_funcs->display_configuration_change(
|
||||
adev->powerplay.pp_handle,
|
||||
&adev->pm.pm_display_cfg);
|
||||
amdgpu_dpm_change_power_state_locked(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
|
||||
} else {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
amdgpu_dpm_get_active_displays(adev);
|
||||
amdgpu_dpm_change_power_state_locked(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2553,11 +2737,6 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
|
||||
uint32_t query = 0;
|
||||
int size;
|
||||
|
||||
/* sanity check PP is enabled */
|
||||
if (!(adev->powerplay.pp_funcs &&
|
||||
adev->powerplay.pp_funcs->read_sensor))
|
||||
return -EINVAL;
|
||||
|
||||
/* GPU Clocks */
|
||||
size = sizeof(value);
|
||||
seq_printf(m, "GFX Clocks and Power:\n");
|
||||
@@ -2649,7 +2828,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
|
||||
seq_printf(m, "PX asic powered off\n");
|
||||
} else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
|
||||
} else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
|
||||
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
|
||||
|
@@ -120,6 +120,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
{
|
||||
int ret;
|
||||
int index;
|
||||
int timeout = 2000;
|
||||
|
||||
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
|
||||
|
||||
@@ -133,8 +134,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (*((unsigned int *)psp->fence_buf) != index)
|
||||
while (*((unsigned int *)psp->fence_buf) != index) {
|
||||
if (--timeout == 0)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/* In some cases, psp response status is not 0 even there is no
|
||||
* problem while the command is submitted. Some version of PSP FW
|
||||
@@ -143,12 +147,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
* during psp initialization to avoid breaking hw_init and it doesn't
|
||||
* return -EINVAL.
|
||||
*/
|
||||
if (psp->cmd_buf_mem->resp.status) {
|
||||
if (psp->cmd_buf_mem->resp.status || !timeout) {
|
||||
if (ucode)
|
||||
DRM_WARN("failed to load ucode id (%d) ",
|
||||
ucode->ucode_id);
|
||||
DRM_WARN("psp command failed and response status is (%d)\n",
|
||||
psp->cmd_buf_mem->resp.status);
|
||||
if (!timeout)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* get xGMI session id from response buffer */
|
||||
@@ -181,13 +187,13 @@ static int psp_tmr_init(struct psp_context *psp)
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 3M memory aligned to 1M from Frame Buffer (local
|
||||
* physical).
|
||||
* According to HW engineer, they prefer the TMR address be "naturally
|
||||
* aligned" , e.g. the start address be an integer divide of TMR size.
|
||||
*
|
||||
* Note: this memory need be reserved till the driver
|
||||
* uninitializes.
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
|
||||
@@ -466,6 +472,206 @@ static int psp_xgmi_initialize(struct psp_context *psp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// ras begin
|
||||
static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint64_t ras_ta_mc, uint64_t ras_mc_shared,
|
||||
uint32_t ras_ta_size, uint32_t shared_size)
|
||||
{
|
||||
cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
|
||||
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
|
||||
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
|
||||
cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
|
||||
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
|
||||
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
|
||||
}
|
||||
|
||||
static int psp_ras_init_shared_buf(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Allocate 16k memory aligned to 4k from Frame Buffer (local
|
||||
* physical) for ras ta <-> Driver
|
||||
*/
|
||||
ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
|
||||
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->ras.ras_shared_bo,
|
||||
&psp->ras.ras_shared_mc_addr,
|
||||
&psp->ras.ras_shared_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psp_ras_load(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
/*
|
||||
* TODO: bypass the loading in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
|
||||
memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
|
||||
|
||||
psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
|
||||
psp->ras.ras_shared_mc_addr,
|
||||
psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
if (!ret) {
|
||||
psp->ras.ras_initialized = 1;
|
||||
psp->ras.session_id = cmd->resp.session_id;
|
||||
}
|
||||
|
||||
kfree(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint32_t ras_session_id)
|
||||
{
|
||||
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
|
||||
cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
|
||||
}
|
||||
|
||||
static int psp_ras_unload(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
/*
|
||||
* TODO: bypass the unloading in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
kfree(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
|
||||
uint32_t ta_cmd_id,
|
||||
uint32_t ras_session_id)
|
||||
{
|
||||
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
|
||||
cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
|
||||
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
|
||||
/* Note: cmd_invoke_cmd.buf is not used for now */
|
||||
}
|
||||
|
||||
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
|
||||
{
|
||||
int ret;
|
||||
struct psp_gfx_cmd_resp *cmd;
|
||||
|
||||
/*
|
||||
* TODO: bypass the loading in sriov for now
|
||||
*/
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
|
||||
psp->ras.session_id);
|
||||
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd,
|
||||
psp->fence_buf_mc_addr);
|
||||
|
||||
kfree(cmd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int psp_ras_enable_features(struct psp_context *psp,
|
||||
union ta_ras_cmd_input *info, bool enable)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
if (enable)
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
|
||||
else
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
|
||||
|
||||
ras_cmd->ras_in_message = *info;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return ras_cmd->ras_status;
|
||||
}
|
||||
|
||||
static int psp_ras_terminate(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
return 0;
|
||||
|
||||
ret = psp_ras_unload(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
psp->ras.ras_initialized = 0;
|
||||
|
||||
/* free ras shared memory */
|
||||
amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
|
||||
&psp->ras.ras_shared_mc_addr,
|
||||
&psp->ras.ras_shared_buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_ras_initialize(struct psp_context *psp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized) {
|
||||
ret = psp_ras_init_shared_buf(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_ras_load(psp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
// ras end
|
||||
|
||||
static int psp_hw_start(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
@@ -473,25 +679,35 @@ static int psp_hw_start(struct psp_context *psp)
|
||||
|
||||
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
|
||||
ret = psp_bootloader_load_sysdrv(psp);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sysdrv failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_bootloader_load_sos(psp);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load sos failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP create ring failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_tmr_load(psp);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load tmr failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = psp_asd_load(psp);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP load asd failed!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
ret = psp_xgmi_initialize(psp);
|
||||
@@ -502,6 +718,15 @@ static int psp_hw_start(struct psp_context *psp)
|
||||
dev_err(psp->adev->dev,
|
||||
"XGMI: Failed to initialize XGMI session\n");
|
||||
}
|
||||
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
ret = psp_ras_initialize(psp);
|
||||
if (ret)
|
||||
dev_err(psp->adev->dev,
|
||||
"RAS: Failed to initialize RAS\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -665,53 +890,52 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||
&psp->fence_buf_mc_addr,
|
||||
&psp->fence_buf);
|
||||
if (ret)
|
||||
goto failed_mem2;
|
||||
goto failed;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
|
||||
(void **)&psp->cmd_buf_mem);
|
||||
if (ret)
|
||||
goto failed_mem1;
|
||||
goto failed;
|
||||
|
||||
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
|
||||
|
||||
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP ring init failed!\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ret = psp_tmr_init(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP tmr init failed!\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ret = psp_asd_init(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP asd init failed!\n");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
skip_memalloc:
|
||||
ret = psp_hw_start(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
goto failed;
|
||||
|
||||
ret = psp_np_fw_load(psp);
|
||||
if (ret)
|
||||
goto failed_mem;
|
||||
goto failed;
|
||||
|
||||
return 0;
|
||||
|
||||
failed_mem:
|
||||
amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
|
||||
&psp->cmd_buf_mc_addr,
|
||||
(void **)&psp->cmd_buf_mem);
|
||||
failed_mem1:
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
&psp->fence_buf_mc_addr, &psp->fence_buf);
|
||||
failed_mem2:
|
||||
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
||||
failed:
|
||||
kfree(psp->cmd);
|
||||
psp->cmd = NULL;
|
||||
/*
|
||||
* all cleanup jobs (xgmi terminate, ras terminate,
|
||||
* ring destroy, cmd/fence/fw buffers destory,
|
||||
* psp->cmd destory) are delayed to psp_hw_fini
|
||||
*/
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -753,6 +977,9 @@ static int psp_hw_fini(void *handle)
|
||||
psp->xgmi_context.initialized == 1)
|
||||
psp_xgmi_terminate(psp);
|
||||
|
||||
if (psp->adev->psp.ta_fw)
|
||||
psp_ras_terminate(psp);
|
||||
|
||||
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
|
||||
@@ -786,6 +1013,14 @@ static int psp_suspend(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
if (psp->adev->psp.ta_fw) {
|
||||
ret = psp_ras_terminate(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to terminate ras ta\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
||||
if (ret) {
|
||||
DRM_ERROR("PSP ring stop failed\n");
|
||||
|
@@ -28,11 +28,13 @@
|
||||
#include "amdgpu.h"
|
||||
#include "psp_gfx_if.h"
|
||||
#include "ta_xgmi_if.h"
|
||||
#include "ta_ras_if.h"
|
||||
|
||||
#define PSP_FENCE_BUFFER_SIZE 0x1000
|
||||
#define PSP_CMD_BUFFER_SIZE 0x1000
|
||||
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
|
||||
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
|
||||
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
|
||||
#define PSP_1_MEG 0x100000
|
||||
#define PSP_TMR_SIZE 0x400000
|
||||
|
||||
@@ -88,6 +90,9 @@ struct psp_funcs
|
||||
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
|
||||
struct psp_xgmi_topology_info *topology);
|
||||
bool (*support_vmr_ring)(struct psp_context *psp);
|
||||
int (*ras_trigger_error)(struct psp_context *psp,
|
||||
struct ta_ras_trigger_error_input *info);
|
||||
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
|
||||
};
|
||||
|
||||
struct psp_xgmi_context {
|
||||
@@ -98,6 +103,16 @@ struct psp_xgmi_context {
|
||||
void *xgmi_shared_buf;
|
||||
};
|
||||
|
||||
struct psp_ras_context {
|
||||
/*ras fw*/
|
||||
bool ras_initialized;
|
||||
uint32_t session_id;
|
||||
struct amdgpu_bo *ras_shared_bo;
|
||||
uint64_t ras_shared_mc_addr;
|
||||
void *ras_shared_buf;
|
||||
struct amdgpu_ras *ras;
|
||||
};
|
||||
|
||||
struct psp_context
|
||||
{
|
||||
struct amdgpu_device *adev;
|
||||
@@ -150,10 +165,15 @@ struct psp_context
|
||||
|
||||
/* xgmi ta firmware and buffer */
|
||||
const struct firmware *ta_fw;
|
||||
uint32_t ta_fw_version;
|
||||
uint32_t ta_xgmi_ucode_version;
|
||||
uint32_t ta_xgmi_ucode_size;
|
||||
uint8_t *ta_xgmi_start_addr;
|
||||
uint32_t ta_ras_ucode_version;
|
||||
uint32_t ta_ras_ucode_size;
|
||||
uint8_t *ta_ras_start_addr;
|
||||
struct psp_xgmi_context xgmi_context;
|
||||
struct psp_ras_context ras;
|
||||
};
|
||||
|
||||
struct amdgpu_psp_funcs {
|
||||
@@ -207,6 +227,13 @@ struct psp_xgmi_topology_info {
|
||||
|
||||
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
|
||||
|
||||
#define psp_ras_trigger_error(psp, info) \
|
||||
((psp)->funcs->ras_trigger_error ? \
|
||||
(psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
|
||||
#define psp_ras_cure_posion(psp, addr) \
|
||||
((psp)->funcs->ras_cure_posion ? \
|
||||
(psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
|
||||
|
||||
extern const struct amd_ip_funcs psp_ip_funcs;
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
|
||||
@@ -217,6 +244,11 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
|
||||
|
||||
int psp_gpu_reset(struct amdgpu_device *adev);
|
||||
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||
|
||||
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
|
||||
int psp_ras_enable_features(struct psp_context *psp,
|
||||
union ta_ras_cmd_input *info, bool enable);
|
||||
|
||||
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
|
||||
|
||||
#endif
|
||||
|
1482
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Normal file
1482
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Normal file
File diff suppressed because it is too large
Load Diff
294
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
Normal file
294
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
Normal file
@@ -0,0 +1,294 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
*/
|
||||
#ifndef _AMDGPU_RAS_H
|
||||
#define _AMDGPU_RAS_H
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/list.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_psp.h"
|
||||
#include "ta_ras_if.h"
|
||||
|
||||
enum amdgpu_ras_block {
|
||||
AMDGPU_RAS_BLOCK__UMC = 0,
|
||||
AMDGPU_RAS_BLOCK__SDMA,
|
||||
AMDGPU_RAS_BLOCK__GFX,
|
||||
AMDGPU_RAS_BLOCK__MMHUB,
|
||||
AMDGPU_RAS_BLOCK__ATHUB,
|
||||
AMDGPU_RAS_BLOCK__PCIE_BIF,
|
||||
AMDGPU_RAS_BLOCK__HDP,
|
||||
AMDGPU_RAS_BLOCK__XGMI_WAFL,
|
||||
AMDGPU_RAS_BLOCK__DF,
|
||||
AMDGPU_RAS_BLOCK__SMN,
|
||||
AMDGPU_RAS_BLOCK__SEM,
|
||||
AMDGPU_RAS_BLOCK__MP0,
|
||||
AMDGPU_RAS_BLOCK__MP1,
|
||||
AMDGPU_RAS_BLOCK__FUSE,
|
||||
|
||||
AMDGPU_RAS_BLOCK__LAST
|
||||
};
|
||||
|
||||
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
|
||||
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
|
||||
|
||||
enum amdgpu_ras_error_type {
|
||||
AMDGPU_RAS_ERROR__NONE = 0,
|
||||
AMDGPU_RAS_ERROR__PARITY = 1,
|
||||
AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
|
||||
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
|
||||
AMDGPU_RAS_ERROR__POISON = 8,
|
||||
};
|
||||
|
||||
enum amdgpu_ras_ret {
|
||||
AMDGPU_RAS_SUCCESS = 0,
|
||||
AMDGPU_RAS_FAIL,
|
||||
AMDGPU_RAS_UE,
|
||||
AMDGPU_RAS_CE,
|
||||
AMDGPU_RAS_PT,
|
||||
};
|
||||
|
||||
struct ras_common_if {
|
||||
enum amdgpu_ras_block block;
|
||||
enum amdgpu_ras_error_type type;
|
||||
uint32_t sub_block_index;
|
||||
/* block name */
|
||||
char name[32];
|
||||
};
|
||||
|
||||
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
||||
struct amdgpu_ras {
|
||||
/* ras infrastructure */
|
||||
/* for ras itself. */
|
||||
uint32_t hw_supported;
|
||||
/* for IP to check its ras ability. */
|
||||
uint32_t supported;
|
||||
uint32_t features;
|
||||
struct list_head head;
|
||||
/* debugfs */
|
||||
struct dentry *dir;
|
||||
/* debugfs ctrl */
|
||||
struct dentry *ent;
|
||||
/* sysfs */
|
||||
struct device_attribute features_attr;
|
||||
/* block array */
|
||||
struct ras_manager *objs;
|
||||
|
||||
/* gpu recovery */
|
||||
struct work_struct recovery_work;
|
||||
atomic_t in_recovery;
|
||||
struct amdgpu_device *adev;
|
||||
/* error handler data */
|
||||
struct ras_err_handler_data *eh_data;
|
||||
struct mutex recovery_lock;
|
||||
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
/* interfaces for IP */
|
||||
|
||||
struct ras_fs_if {
|
||||
struct ras_common_if head;
|
||||
char sysfs_name[32];
|
||||
char debugfs_name[32];
|
||||
};
|
||||
|
||||
struct ras_query_if {
|
||||
struct ras_common_if head;
|
||||
unsigned long ue_count;
|
||||
unsigned long ce_count;
|
||||
};
|
||||
|
||||
struct ras_inject_if {
|
||||
struct ras_common_if head;
|
||||
uint64_t address;
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
struct ras_cure_if {
|
||||
struct ras_common_if head;
|
||||
uint64_t address;
|
||||
};
|
||||
|
||||
struct ras_ih_if {
|
||||
struct ras_common_if head;
|
||||
ras_ih_cb cb;
|
||||
};
|
||||
|
||||
struct ras_dispatch_if {
|
||||
struct ras_common_if head;
|
||||
struct amdgpu_iv_entry *entry;
|
||||
};
|
||||
|
||||
struct ras_debug_if {
|
||||
union {
|
||||
struct ras_common_if head;
|
||||
struct ras_inject_if inject;
|
||||
};
|
||||
int op;
|
||||
};
|
||||
/* work flow
|
||||
* vbios
|
||||
* 1: ras feature enable (enabled by default)
|
||||
* psp
|
||||
* 2: ras framework init (in ip_init)
|
||||
* IP
|
||||
* 3: IH add
|
||||
* 4: debugfs/sysfs create
|
||||
* 5: query/inject
|
||||
* 6: debugfs/sysfs remove
|
||||
* 7: IH remove
|
||||
* 8: feature disable
|
||||
*/
|
||||
|
||||
#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
|
||||
#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
|
||||
|
||||
/* check if ras is supported on block, say, sdma, gfx */
|
||||
static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
||||
unsigned int block)
|
||||
{
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
return ras && (ras->supported & (1 << block));
|
||||
}
|
||||
|
||||
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
|
||||
bool is_ce);
|
||||
|
||||
/* error handling functions */
|
||||
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
|
||||
unsigned long *bps, int pages);
|
||||
|
||||
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
|
||||
|
||||
static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
|
||||
bool is_baco)
|
||||
{
|
||||
/* remove me when gpu reset works on vega20 A1. */
|
||||
#if 0
|
||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
|
||||
schedule_work(&ras->recovery_work);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum ta_ras_block
|
||||
amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
|
||||
switch (block) {
|
||||
case AMDGPU_RAS_BLOCK__UMC:
|
||||
return TA_RAS_BLOCK__UMC;
|
||||
case AMDGPU_RAS_BLOCK__SDMA:
|
||||
return TA_RAS_BLOCK__SDMA;
|
||||
case AMDGPU_RAS_BLOCK__GFX:
|
||||
return TA_RAS_BLOCK__GFX;
|
||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||
return TA_RAS_BLOCK__MMHUB;
|
||||
case AMDGPU_RAS_BLOCK__ATHUB:
|
||||
return TA_RAS_BLOCK__ATHUB;
|
||||
case AMDGPU_RAS_BLOCK__PCIE_BIF:
|
||||
return TA_RAS_BLOCK__PCIE_BIF;
|
||||
case AMDGPU_RAS_BLOCK__HDP:
|
||||
return TA_RAS_BLOCK__HDP;
|
||||
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
|
||||
return TA_RAS_BLOCK__XGMI_WAFL;
|
||||
case AMDGPU_RAS_BLOCK__DF:
|
||||
return TA_RAS_BLOCK__DF;
|
||||
case AMDGPU_RAS_BLOCK__SMN:
|
||||
return TA_RAS_BLOCK__SMN;
|
||||
case AMDGPU_RAS_BLOCK__SEM:
|
||||
return TA_RAS_BLOCK__SEM;
|
||||
case AMDGPU_RAS_BLOCK__MP0:
|
||||
return TA_RAS_BLOCK__MP0;
|
||||
case AMDGPU_RAS_BLOCK__MP1:
|
||||
return TA_RAS_BLOCK__MP1;
|
||||
case AMDGPU_RAS_BLOCK__FUSE:
|
||||
return TA_RAS_BLOCK__FUSE;
|
||||
default:
|
||||
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
|
||||
return TA_RAS_BLOCK__UMC;
|
||||
}
|
||||
}
|
||||
|
||||
static inline enum ta_ras_error_type
|
||||
amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
|
||||
switch (error) {
|
||||
case AMDGPU_RAS_ERROR__NONE:
|
||||
return TA_RAS_ERROR__NONE;
|
||||
case AMDGPU_RAS_ERROR__PARITY:
|
||||
return TA_RAS_ERROR__PARITY;
|
||||
case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
|
||||
return TA_RAS_ERROR__SINGLE_CORRECTABLE;
|
||||
case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
|
||||
return TA_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
case AMDGPU_RAS_ERROR__POISON:
|
||||
return TA_RAS_ERROR__POISON;
|
||||
default:
|
||||
WARN_ONCE(1, "RAS ERROR: unexpected error type %d\n", error);
|
||||
return TA_RAS_ERROR__NONE;
|
||||
}
|
||||
}
|
||||
|
||||
/* called in ip_init and ip_fini */
|
||||
int amdgpu_ras_init(struct amdgpu_device *adev);
|
||||
void amdgpu_ras_post_init(struct amdgpu_device *adev);
|
||||
int amdgpu_ras_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head, bool enable);
|
||||
|
||||
int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head, bool enable);
|
||||
|
||||
int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
|
||||
struct ras_fs_if *head);
|
||||
|
||||
int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head);
|
||||
|
||||
int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
|
||||
struct ras_fs_if *head);
|
||||
|
||||
int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
|
||||
struct ras_common_if *head);
|
||||
|
||||
int amdgpu_ras_error_query(struct amdgpu_device *adev,
|
||||
struct ras_query_if *info);
|
||||
|
||||
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
|
||||
struct ras_inject_if *info);
|
||||
|
||||
int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
|
||||
struct ras_ih_if *info);
|
||||
|
||||
int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
|
||||
struct ras_ih_if *info);
|
||||
|
||||
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
|
||||
struct ras_dispatch_if *info);
|
||||
#endif
|
@@ -248,6 +248,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
*/
|
||||
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
|
||||
sched_hw_submission = max(sched_hw_submission, 256);
|
||||
else if (ring == &adev->sdma.instance[0].page)
|
||||
sched_hw_submission = 256;
|
||||
|
||||
if (ring->adev == NULL) {
|
||||
if (adev->num_rings >= AMDGPU_MAX_RINGS)
|
||||
|
@@ -28,9 +28,8 @@
|
||||
#define AMDGPU_MAX_SDMA_INSTANCES 2
|
||||
|
||||
enum amdgpu_sdma_irq {
|
||||
AMDGPU_SDMA_IRQ_TRAP0 = 0,
|
||||
AMDGPU_SDMA_IRQ_TRAP1,
|
||||
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_SDMA_IRQ_LAST
|
||||
};
|
||||
|
||||
@@ -49,9 +48,11 @@ struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_irq_src trap_irq;
|
||||
struct amdgpu_irq_src illegal_inst_irq;
|
||||
struct amdgpu_irq_src ecc_irq;
|
||||
int num_instances;
|
||||
uint32_t srbm_soft_reset;
|
||||
bool has_page_queue;
|
||||
struct ras_common_if *ras_if;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -50,8 +50,6 @@
|
||||
#include "amdgpu_sdma.h"
|
||||
#include "bif/bif_4_1_d.h"
|
||||
|
||||
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
|
||||
|
||||
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem, unsigned num_pages,
|
||||
uint64_t offset, unsigned window,
|
||||
@@ -1424,6 +1422,13 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
struct dma_fence *f;
|
||||
int i;
|
||||
|
||||
/* Don't evict VM page tables while they are busy, otherwise we can't
|
||||
* cleanly handle page faults.
|
||||
*/
|
||||
if (bo->type == ttm_bo_type_kernel &&
|
||||
!reservation_object_test_signaled_rcu(bo->resv, true))
|
||||
return false;
|
||||
|
||||
/* If bo is a KFD BO, check if the bo belongs to the current process.
|
||||
* If true, then return false as any KFD process needs all its BOs to
|
||||
* be resident to run successfully
|
||||
@@ -1671,7 +1676,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
r = ttm_bo_device_init(&adev->mman.bdev,
|
||||
&amdgpu_bo_driver,
|
||||
adev->ddev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET,
|
||||
adev->need_dma32);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
@@ -1877,14 +1881,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
|
||||
|
||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct amdgpu_device *adev;
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
|
||||
return -EINVAL;
|
||||
|
||||
file_priv = filp->private_data;
|
||||
adev = file_priv->minor->dev->dev_private;
|
||||
if (adev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
|
||||
/* enable virtual display */
|
||||
adev->mode_info.num_crtc = 1;
|
||||
adev->enable_virtual_display = true;
|
||||
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
}
|
||||
@@ -375,4 +376,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t parse_clk(char *buf, bool min)
|
||||
{
|
||||
char *ptr = buf;
|
||||
uint32_t clk = 0;
|
||||
|
||||
do {
|
||||
ptr = strchr(ptr, ':');
|
||||
if (!ptr)
|
||||
break;
|
||||
ptr+=2;
|
||||
clk = simple_strtoul(ptr, NULL, 10);
|
||||
} while (!min);
|
||||
|
||||
return clk * 100;
|
||||
}
|
||||
|
||||
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
|
||||
{
|
||||
char *buf = NULL;
|
||||
uint32_t clk = 0;
|
||||
|
||||
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
|
||||
clk = parse_clk(buf, lowest);
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
||||
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
|
||||
{
|
||||
char *buf = NULL;
|
||||
uint32_t clk = 0;
|
||||
|
||||
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
|
||||
clk = parse_clk(buf, lowest);
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return clk;
|
||||
}
|
||||
|
||||
|
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
|
||||
int (*reset_gpu)(struct amdgpu_device *adev);
|
||||
int (*wait_reset)(struct amdgpu_device *adev);
|
||||
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
|
||||
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
|
||||
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
|
||||
/* VRAM LOST by GIM */
|
||||
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
|
||||
/* HW PERF SIM in GIM */
|
||||
AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
|
||||
};
|
||||
|
||||
struct amd_sriov_msg_pf2vf_info_header {
|
||||
@@ -252,6 +256,8 @@ struct amdgpu_virt {
|
||||
struct amdgpu_vf_error_buffer vf_errors;
|
||||
struct amdgpu_virt_fw_reserve fw_reserve;
|
||||
uint32_t gim_feature;
|
||||
/* protect DPM events to GIM */
|
||||
struct mutex dpm_mutex;
|
||||
};
|
||||
|
||||
#define amdgpu_sriov_enabled(adev) \
|
||||
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define amdgim_is_hwperf(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
|
||||
|
||||
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
|
||||
unsigned int key,
|
||||
unsigned int chksum);
|
||||
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
|
||||
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -30,7 +30,6 @@
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <linux/chash.h>
|
||||
|
||||
#include "amdgpu_sync.h"
|
||||
#include "amdgpu_ring.h"
|
||||
@@ -140,7 +139,6 @@ struct amdgpu_vm_bo_base {
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_vm_bo_base base;
|
||||
bool huge;
|
||||
|
||||
/* array of page tables, one for each directory entry */
|
||||
struct amdgpu_vm_pt *entries;
|
||||
@@ -167,11 +165,6 @@ struct amdgpu_vm_pte_funcs {
|
||||
uint32_t incr, uint64_t flags);
|
||||
};
|
||||
|
||||
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
|
||||
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
|
||||
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
|
||||
|
||||
|
||||
struct amdgpu_task_info {
|
||||
char process_name[TASK_COMM_LEN];
|
||||
char task_name[TASK_COMM_LEN];
|
||||
@@ -179,11 +172,52 @@ struct amdgpu_task_info {
|
||||
pid_t tgid;
|
||||
};
|
||||
|
||||
#define AMDGPU_PAGEFAULT_HASH_BITS 8
|
||||
struct amdgpu_retryfault_hashtable {
|
||||
DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
|
||||
spinlock_t lock;
|
||||
int count;
|
||||
/**
|
||||
* struct amdgpu_vm_update_params
|
||||
*
|
||||
* Encapsulate some VM table update parameters to reduce
|
||||
* the number of function parameters
|
||||
*
|
||||
*/
|
||||
struct amdgpu_vm_update_params {
|
||||
|
||||
/**
|
||||
* @adev: amdgpu device we do this update for
|
||||
*/
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
/**
|
||||
* @vm: optional amdgpu_vm we do this update for
|
||||
*/
|
||||
struct amdgpu_vm *vm;
|
||||
|
||||
/**
|
||||
* @pages_addr:
|
||||
*
|
||||
* DMA addresses to use for mapping
|
||||
*/
|
||||
dma_addr_t *pages_addr;
|
||||
|
||||
/**
|
||||
* @job: job to used for hw submission
|
||||
*/
|
||||
struct amdgpu_job *job;
|
||||
|
||||
/**
|
||||
* @num_dw_left: number of dw left for the IB
|
||||
*/
|
||||
unsigned int num_dw_left;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_update_funcs {
|
||||
int (*map_table)(struct amdgpu_bo *bo);
|
||||
int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
|
||||
struct dma_fence *exclusive);
|
||||
int (*update)(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
|
||||
unsigned count, uint32_t incr, uint64_t flags);
|
||||
int (*commit)(struct amdgpu_vm_update_params *p,
|
||||
struct dma_fence **fence);
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
@@ -221,7 +255,10 @@ struct amdgpu_vm {
|
||||
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||
|
||||
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
||||
bool use_cpu_for_update;
|
||||
bool use_cpu_for_update;
|
||||
|
||||
/* Functions to use for VM table updates */
|
||||
const struct amdgpu_vm_update_funcs *update_funcs;
|
||||
|
||||
/* Flag to indicate ATS support from PTE for GFX9 */
|
||||
bool pte_support_ats;
|
||||
@@ -245,7 +282,6 @@ struct amdgpu_vm {
|
||||
struct ttm_lru_bulk_move lru_bulk_move;
|
||||
/* mark whether can do the bulk move */
|
||||
bool bulk_moveable;
|
||||
struct amdgpu_retryfault_hashtable *fault_hash;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
@@ -267,6 +303,7 @@ struct amdgpu_vm_manager {
|
||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
|
||||
unsigned vm_pte_num_rqs;
|
||||
struct amdgpu_ring *page_fault;
|
||||
|
||||
/* partial resident texture handling */
|
||||
spinlock_t prt_lock;
|
||||
@@ -283,14 +320,23 @@ struct amdgpu_vm_manager {
|
||||
*/
|
||||
struct idr pasid_idr;
|
||||
spinlock_t pasid_lock;
|
||||
|
||||
/* counter of mapped memory through xgmi */
|
||||
uint32_t xgmi_map_counter;
|
||||
struct mutex lock_pstate;
|
||||
};
|
||||
|
||||
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
|
||||
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
|
||||
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
|
||||
|
||||
extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
|
||||
extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
|
||||
|
||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int vm_context, unsigned int pasid);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
|
||||
@@ -303,9 +349,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int (*callback)(void *p, struct amdgpu_bo *bo),
|
||||
void *param);
|
||||
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
uint64_t saddr, uint64_t size);
|
||||
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
|
||||
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
@@ -319,6 +362,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
bool clear);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo, bool evicted);
|
||||
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
@@ -358,11 +402,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
|
||||
|
||||
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
|
||||
int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
|
||||
|
||||
void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
|
||||
|
||||
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
|
||||
|
||||
#endif
|
||||
|
127
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
Normal file
127
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
Normal file
@@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_object.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
/**
|
||||
* amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
|
||||
*
|
||||
* @table: newly allocated or validated PD/PT
|
||||
*/
|
||||
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
|
||||
{
|
||||
return amdgpu_bo_kmap(table, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @owner: owner we need to sync to
|
||||
* @exclusive: exclusive move fence we need to sync to
|
||||
*
|
||||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
|
||||
struct dma_fence *exclusive)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Wait for PT BOs to be idle. PTs share the same resv. object
|
||||
* as the root PD BO
|
||||
*/
|
||||
r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
|
||||
/* Wait for any BO move to be completed */
|
||||
if (exclusive) {
|
||||
r = dma_fence_wait(exclusive, true);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_cpu_update - helper to update page tables via CPU
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: kmap addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Write count number of PT/PD entries directly.
|
||||
*/
|
||||
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe,
|
||||
uint64_t addr, unsigned count, uint32_t incr,
|
||||
uint64_t flags)
|
||||
{
|
||||
unsigned int i;
|
||||
uint64_t value;
|
||||
|
||||
pe += (unsigned long)amdgpu_bo_kptr(bo);
|
||||
|
||||
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
value = p->pages_addr ?
|
||||
amdgpu_vm_map_gart(p->pages_addr, addr) :
|
||||
addr;
|
||||
amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
|
||||
i, value, flags);
|
||||
addr += incr;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_cpu_commit - commit page table update to the HW
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @fence: unused
|
||||
*
|
||||
* Make sure that the hardware sees the page table updates.
|
||||
*/
|
||||
static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
/* Flush HDP */
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(p->adev, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
|
||||
.map_table = amdgpu_vm_cpu_map_table,
|
||||
.prepare = amdgpu_vm_cpu_prepare,
|
||||
.update = amdgpu_vm_cpu_update,
|
||||
.commit = amdgpu_vm_cpu_commit
|
||||
};
|
270
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
Normal file
270
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
Normal file
@@ -0,0 +1,270 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_job.h"
|
||||
#include "amdgpu_object.h"
|
||||
#include "amdgpu_trace.h"
|
||||
|
||||
#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
|
||||
#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
|
||||
*
|
||||
* @table: newly allocated or validated PD/PT
|
||||
*/
|
||||
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&table->tbo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (table->shadow)
|
||||
r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @owner: owner we need to sync to
|
||||
* @exclusive: exclusive move fence we need to sync to
|
||||
*
|
||||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||
void *owner, struct dma_fence *exclusive)
|
||||
{
|
||||
struct amdgpu_bo *root = p->vm->root.base.bo;
|
||||
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
|
||||
owner, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_commit - commit SDMA command submission
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @fence: resulting fence
|
||||
*
|
||||
* Returns:
|
||||
* Negativ errno, 0 for success.
|
||||
*/
|
||||
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_bo *root = p->vm->root.base.bo;
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
struct amdgpu_ring *ring;
|
||||
struct dma_fence *f;
|
||||
int r;
|
||||
|
||||
ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
|
||||
|
||||
WARN_ON(ib->length_dw == 0);
|
||||
amdgpu_ring_pad_ib(ring, ib);
|
||||
WARN_ON(ib->length_dw > p->num_dw_left);
|
||||
r = amdgpu_job_submit(p->job, &p->vm->entity,
|
||||
AMDGPU_FENCE_OWNER_VM, &f);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
amdgpu_bo_fence(root, f, true);
|
||||
if (fence)
|
||||
swap(*fence, f);
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
amdgpu_job_free(p->job);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: addr of the page entry
|
||||
* @count: number of page entries to copy
|
||||
*
|
||||
* Traces the parameters and calls the DMA function to copy the PTEs.
|
||||
*/
|
||||
static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe,
|
||||
unsigned count)
|
||||
{
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
uint64_t src = ib->gpu_addr;
|
||||
|
||||
src += p->num_dw_left * 4;
|
||||
|
||||
pe += amdgpu_bo_gpu_offset(bo);
|
||||
trace_amdgpu_vm_copy_ptes(pe, src, count);
|
||||
|
||||
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_set_ptes - helper to call the right asic function
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Traces the parameters and calls the right asic functions
|
||||
* to setup the page table using the DMA.
|
||||
*/
|
||||
static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint64_t flags)
|
||||
{
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
|
||||
pe += amdgpu_bo_gpu_offset(bo);
|
||||
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
|
||||
if (count < 3) {
|
||||
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
|
||||
count, incr);
|
||||
} else {
|
||||
amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
|
||||
count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_update - execute VM update
|
||||
*
|
||||
* @p: see amdgpu_vm_update_params definition
|
||||
* @bo: PD/PT to update
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Reserve space in the IB, setup mapping buffer on demand and write commands to
|
||||
* the IB.
|
||||
*/
|
||||
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo, uint64_t pe,
|
||||
uint64_t addr, unsigned count, uint32_t incr,
|
||||
uint64_t flags)
|
||||
{
|
||||
unsigned int i, ndw, nptes;
|
||||
uint64_t *pte;
|
||||
int r;
|
||||
|
||||
do {
|
||||
ndw = p->num_dw_left;
|
||||
ndw -= p->job->ibs->length_dw;
|
||||
|
||||
if (ndw < 32) {
|
||||
r = amdgpu_vm_sdma_commit(p, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* estimate how many dw we need */
|
||||
ndw = 32;
|
||||
if (p->pages_addr)
|
||||
ndw += count * 2;
|
||||
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
}
|
||||
|
||||
if (!p->pages_addr) {
|
||||
/* set page commands needed */
|
||||
if (bo->shadow)
|
||||
amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
|
||||
count, incr, flags);
|
||||
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
|
||||
incr, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* copy commands needed */
|
||||
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
|
||||
(bo->shadow ? 2 : 1);
|
||||
|
||||
/* for padding */
|
||||
ndw -= 7;
|
||||
|
||||
nptes = min(count, ndw / 2);
|
||||
|
||||
/* Put the PTEs at the end of the IB. */
|
||||
p->num_dw_left -= nptes * 2;
|
||||
pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
|
||||
for (i = 0; i < nptes; ++i, addr += incr) {
|
||||
pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
|
||||
pte[i] |= flags;
|
||||
}
|
||||
|
||||
if (bo->shadow)
|
||||
amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
|
||||
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
|
||||
|
||||
pe += nptes * 8;
|
||||
count -= nptes;
|
||||
} while (count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
|
||||
.map_table = amdgpu_vm_sdma_map_table,
|
||||
.prepare = amdgpu_vm_sdma_prepare,
|
||||
.update = amdgpu_vm_sdma_update,
|
||||
.commit = amdgpu_vm_sdma_commit
|
||||
};
|
@@ -32,6 +32,85 @@ struct amdgpu_vram_mgr {
|
||||
atomic64_t vis_usage;
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: mem_info_vram_total
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total VRAM
|
||||
* available on the device
|
||||
* The file mem_info_vram_total is used for this and returns the total
|
||||
* amount of VRAM in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: mem_info_vis_vram_total
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total
|
||||
* visible VRAM available on the device
|
||||
* The file mem_info_vis_vram_total is used for this and returns the total
|
||||
* amount of visible VRAM in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: mem_info_vram_used
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total VRAM
|
||||
* available on the device
|
||||
* The file mem_info_vram_used is used for this and returns the total
|
||||
* amount of currently used VRAM in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: mem_info_vis_vram_used
|
||||
*
|
||||
* The amdgpu driver provides a sysfs API for reporting current total of
|
||||
* used visible VRAM
|
||||
* The file mem_info_vis_vram_used is used for this and returns the total
|
||||
* amount of currently used visible VRAM in bytes
|
||||
*/
|
||||
static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
|
||||
amdgpu_mem_info_vram_total_show, NULL);
|
||||
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
|
||||
amdgpu_mem_info_vis_vram_total_show,NULL);
|
||||
static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
|
||||
amdgpu_mem_info_vram_used_show, NULL);
|
||||
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
|
||||
amdgpu_mem_info_vis_vram_used_show, NULL);
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
|
||||
*
|
||||
@@ -43,7 +122,9 @@ struct amdgpu_vram_mgr {
|
||||
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
||||
struct amdgpu_vram_mgr *mgr;
|
||||
int ret;
|
||||
|
||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
if (!mgr)
|
||||
@@ -52,6 +133,29 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
|
||||
drm_mm_init(&mgr->mm, 0, p_size);
|
||||
spin_lock_init(&mgr->lock);
|
||||
man->priv = mgr;
|
||||
|
||||
/* Add the two VRAM-related sysfs files */
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_vram_total\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_vram_used\n");
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -65,6 +169,7 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
|
||||
*/
|
||||
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
|
||||
struct amdgpu_vram_mgr *mgr = man->priv;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
@@ -72,6 +177,10 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
|
||||
spin_unlock(&mgr->lock);
|
||||
kfree(mgr);
|
||||
man->priv = NULL;
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
|
||||
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <linux/list.h>
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_xgmi.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
|
||||
static DEFINE_MUTEX(xgmi_mutex);
|
||||
@@ -34,12 +35,132 @@ static DEFINE_MUTEX(xgmi_mutex);
|
||||
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
|
||||
static unsigned hive_count = 0;
|
||||
|
||||
|
||||
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
|
||||
{
|
||||
return &hive->device_list;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct amdgpu_hive_info *hive =
|
||||
container_of(attr, struct amdgpu_hive_info, dev_attr);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
|
||||
}
|
||||
|
||||
static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(hive->kobj))
|
||||
return -EINVAL;
|
||||
|
||||
hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
|
||||
if (!hive->kobj) {
|
||||
dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hive->dev_attr = (struct device_attribute) {
|
||||
.attr = {
|
||||
.name = "xgmi_hive_id",
|
||||
.mode = S_IRUGO,
|
||||
|
||||
},
|
||||
.show = amdgpu_xgmi_show_hive_id,
|
||||
};
|
||||
|
||||
ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
|
||||
kobject_del(hive->kobj);
|
||||
kobject_put(hive->kobj);
|
||||
hive->kobj = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
|
||||
kobject_del(hive->kobj);
|
||||
kobject_put(hive->kobj);
|
||||
hive->kobj = NULL;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = ddev->dev_private;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
|
||||
|
||||
}
|
||||
|
||||
|
||||
static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
|
||||
|
||||
|
||||
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
int ret = 0;
|
||||
char node[10] = { 0 };
|
||||
|
||||
/* Create xgmi device id file */
|
||||
ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Create sysfs link to hive info folder on the first device */
|
||||
if (adev != hive->adev) {
|
||||
ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
|
||||
"xgmi_hive_info");
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "XGMI: Failed to create link to hive info");
|
||||
goto remove_file;
|
||||
}
|
||||
}
|
||||
|
||||
sprintf(node, "node%d", hive->number_devices);
|
||||
/* Create sysfs link form the hive folder to yourself */
|
||||
ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "XGMI: Failed to create link from hive info");
|
||||
goto remove_link;
|
||||
}
|
||||
|
||||
goto success;
|
||||
|
||||
|
||||
remove_link:
|
||||
sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
|
||||
|
||||
remove_file:
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
|
||||
|
||||
success:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
|
||||
struct amdgpu_hive_info *hive)
|
||||
{
|
||||
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
|
||||
sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
|
||||
sysfs_remove_link(hive->kobj, adev->ddev->unique);
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
|
||||
{
|
||||
int i;
|
||||
@@ -66,18 +187,50 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
|
||||
|
||||
/* initialize new hive if not exist */
|
||||
tmp = &xgmi_hives[hive_count++];
|
||||
|
||||
if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
|
||||
mutex_unlock(&xgmi_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp->adev = adev;
|
||||
tmp->hive_id = adev->gmc.xgmi.hive_id;
|
||||
INIT_LIST_HEAD(&tmp->device_list);
|
||||
mutex_init(&tmp->hive_lock);
|
||||
mutex_init(&tmp->reset_lock);
|
||||
|
||||
if (lock)
|
||||
mutex_lock(&tmp->hive_lock);
|
||||
|
||||
tmp->pstate = -1;
|
||||
mutex_unlock(&xgmi_mutex);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
|
||||
{
|
||||
int ret = 0;
|
||||
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
|
||||
|
||||
if (!hive)
|
||||
return 0;
|
||||
|
||||
if (hive->pstate == pstate)
|
||||
return 0;
|
||||
|
||||
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
|
||||
|
||||
if (is_support_sw_smu(adev))
|
||||
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
|
||||
if (ret)
|
||||
dev_err(adev->dev,
|
||||
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
|
||||
adev->gmc.xgmi.node_id,
|
||||
adev->gmc.xgmi.hive_id, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
@@ -156,8 +309,17 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
|
||||
break;
|
||||
}
|
||||
|
||||
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
|
||||
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
|
||||
if (!ret)
|
||||
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
|
||||
|
||||
if (!ret)
|
||||
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
|
||||
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
|
||||
else
|
||||
dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
|
||||
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
|
||||
ret);
|
||||
|
||||
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
exit:
|
||||
@@ -176,9 +338,11 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||||
return;
|
||||
|
||||
if (!(hive->number_devices--)) {
|
||||
amdgpu_xgmi_sysfs_destroy(adev, hive);
|
||||
mutex_destroy(&hive->hive_lock);
|
||||
mutex_destroy(&hive->reset_lock);
|
||||
} else {
|
||||
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
|
||||
mutex_unlock(&hive->hive_lock);
|
||||
}
|
||||
}
|
||||
|
@@ -29,13 +29,25 @@ struct amdgpu_hive_info {
|
||||
struct list_head device_list;
|
||||
struct psp_xgmi_topology_info topology_info;
|
||||
int number_devices;
|
||||
struct mutex hive_lock,
|
||||
reset_lock;
|
||||
struct mutex hive_lock, reset_lock;
|
||||
struct kobject *kobj;
|
||||
struct device_attribute dev_attr;
|
||||
struct amdgpu_device *adev;
|
||||
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
|
||||
};
|
||||
|
||||
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
|
||||
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
|
||||
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
|
||||
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
|
||||
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
||||
|
||||
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
||||
struct amdgpu_device *bo_adev)
|
||||
{
|
||||
return (adev != bo_adev &&
|
||||
adev->gmc.xgmi.hive_id &&
|
||||
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
u32 sdma_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_SDMA_IRQ_TRAP0:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE0:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
@@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_SDMA_IRQ_TRAP1:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE1:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
|
@@ -782,6 +782,25 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
||||
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
|
||||
tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
ARRAY_MODE(ARRAY_1D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_8x16);
|
||||
tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
|
||||
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK) |
|
||||
TILE_SPLIT(split_equal_to_row_size);
|
||||
tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
ARRAY_MODE(ARRAY_2D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
|
||||
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK) |
|
||||
TILE_SPLIT(split_equal_to_row_size);
|
||||
tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
|
||||
|
@@ -3236,6 +3236,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
||||
dev_warn(adev->dev,
|
||||
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
|
||||
adev->asic_type);
|
||||
/* fall through */
|
||||
|
||||
case CHIP_CARRIZO:
|
||||
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
|
@@ -40,6 +40,8 @@
|
||||
|
||||
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
|
||||
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
#define GFX9_NUM_GFX_RINGS 1
|
||||
#define GFX9_MEC_HPD_SIZE 4096
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
@@ -576,6 +578,27 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
||||
break;
|
||||
if ((adev->gfx.rlc_fw_version < 531) ||
|
||||
(adev->gfx.rlc_fw_version == 53815) ||
|
||||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
!adev->gfx.rlc.is_rlc_v2_1)
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
@@ -828,6 +851,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
out:
|
||||
gfx_v9_0_check_if_need_gfxoff(adev);
|
||||
gfx_v9_0_check_fw_write_wait(adev);
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
@@ -1639,6 +1663,18 @@ static int gfx_v9_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* ECC error */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
|
||||
&adev->gfx.cp_ecc_error_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* FUE error */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
|
||||
&adev->gfx.cp_ecc_error_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
|
||||
|
||||
gfx_v9_0_scratch_init(adev);
|
||||
@@ -1731,6 +1767,20 @@ static int gfx_v9_0_sw_fini(void *handle)
|
||||
int i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
|
||||
adev->gfx.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->gfx.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
};
|
||||
|
||||
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||
kfree(ras_if);
|
||||
}
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
|
||||
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
|
||||
@@ -3303,6 +3353,7 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||
|
||||
@@ -3492,6 +3543,80 @@ static int gfx_v9_0_early_init(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
||||
static int gfx_v9_0_ecc_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ras_common_if **ras_if = &adev->gfx.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = gfx_v9_0_process_ras_data_cb,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "gfx_err_count",
|
||||
.debugfs_name = "gfx_err_inject",
|
||||
};
|
||||
struct ras_common_if ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__GFX,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "gfx",
|
||||
};
|
||||
int r;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (*ras_if)
|
||||
goto resume;
|
||||
|
||||
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
|
||||
if (!*ras_if)
|
||||
return -ENOMEM;
|
||||
|
||||
**ras_if = ras_block;
|
||||
|
||||
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||
if (r)
|
||||
goto feature;
|
||||
|
||||
ih_info.head = **ras_if;
|
||||
fs_info.head = **ras_if;
|
||||
|
||||
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||
if (r)
|
||||
goto interrupt;
|
||||
|
||||
r = amdgpu_ras_debugfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto debugfs;
|
||||
|
||||
r = amdgpu_ras_sysfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto sysfs;
|
||||
resume:
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
if (r)
|
||||
goto irq;
|
||||
|
||||
return 0;
|
||||
irq:
|
||||
amdgpu_ras_sysfs_remove(adev, *ras_if);
|
||||
sysfs:
|
||||
amdgpu_ras_debugfs_remove(adev, *ras_if);
|
||||
debugfs:
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
interrupt:
|
||||
amdgpu_ras_feature_enable(adev, *ras_if, 0);
|
||||
feature:
|
||||
kfree(*ras_if);
|
||||
*ras_if = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -3505,6 +3630,10 @@ static int gfx_v9_0_late_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = gfx_v9_0_ecc_late_init(handle);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4541,6 +4670,45 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
|
||||
WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
|
||||
CP_ECC_ERROR_INT_ENABLE, 1)
|
||||
|
||||
#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
|
||||
WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
|
||||
CP_ECC_ERROR_INT_ENABLE, 0)
|
||||
|
||||
static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
|
||||
CP_ECC_ERROR_INT_ENABLE, 0);
|
||||
DISABLE_ECC_ON_ME_PIPE(1, 0);
|
||||
DISABLE_ECC_ON_ME_PIPE(1, 1);
|
||||
DISABLE_ECC_ON_ME_PIPE(1, 2);
|
||||
DISABLE_ECC_ON_ME_PIPE(1, 3);
|
||||
break;
|
||||
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
|
||||
CP_ECC_ERROR_INT_ENABLE, 1);
|
||||
ENABLE_ECC_ON_ME_PIPE(1, 0);
|
||||
ENABLE_ECC_ON_ME_PIPE(1, 1);
|
||||
ENABLE_ECC_ON_ME_PIPE(1, 2);
|
||||
ENABLE_ECC_ON_ME_PIPE(1, 3);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
@@ -4657,6 +4825,34 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
/* TODO ue will trigger an interrupt. */
|
||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||
amdgpu_ras_reset_gpu(adev, 0);
|
||||
return AMDGPU_RAS_UE;
|
||||
}
|
||||
|
||||
static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct ras_common_if *ras_if = adev->gfx.ras_if;
|
||||
struct ras_dispatch_if ih_data = {
|
||||
.entry = entry,
|
||||
};
|
||||
|
||||
if (!ras_if)
|
||||
return 0;
|
||||
|
||||
ih_data.head = *ras_if;
|
||||
|
||||
DRM_ERROR("CP ECC ERROR IRQ\n");
|
||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
|
||||
.name = "gfx_v9_0",
|
||||
.early_init = gfx_v9_0_early_init,
|
||||
@@ -4818,6 +5014,12 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
|
||||
.process = gfx_v9_0_priv_inst_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
|
||||
.set = gfx_v9_0_set_cp_ecc_error_state,
|
||||
.process = gfx_v9_0_cp_ecc_error_irq,
|
||||
};
|
||||
|
||||
|
||||
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
|
||||
@@ -4828,6 +5030,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
||||
adev->gfx.priv_inst_irq.num_types = 1;
|
||||
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
|
||||
|
||||
adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
|
||||
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
|
||||
}
|
||||
|
||||
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
|
@@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
/* XXX for emulation, Refer to closed source code.*/
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
||||
0);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
|
||||
@@ -236,7 +236,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||
block_size);
|
||||
/* Send no-retry XNACK on fault to suppress VM fault storm. */
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
|
||||
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
|
||||
|
@@ -225,7 +225,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@@ -383,20 +383,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
return pd_addr;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)cpu_pt_addr;
|
||||
uint64_t value;
|
||||
|
||||
value = addr & 0xFFFFFFFFFFFFF000ULL;
|
||||
value |= flags;
|
||||
writeq(value, ptr + (gpu_page_idx * 8));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||
uint32_t flags)
|
||||
{
|
||||
@@ -886,7 +872,7 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
|
||||
r = gmc_v6_0_init_microcode(adev);
|
||||
if (r) {
|
||||
@@ -1169,7 +1155,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
|
||||
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
|
||||
.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
|
||||
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
|
||||
.set_pte_pde = gmc_v6_0_set_pte_pde,
|
||||
.set_prt = gmc_v6_0_set_prt,
|
||||
.get_vm_pde = gmc_v6_0_get_vm_pde,
|
||||
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
|
||||
|
@@ -242,7 +242,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@@ -460,31 +460,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v7_0_set_pte_pde - update the page tables using MMIO
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @cpu_pt_addr: cpu address of the page table
|
||||
* @gpu_page_idx: entry in the page table to update
|
||||
* @addr: dst addr to write into pte/pde
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CPU.
|
||||
*/
|
||||
static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)cpu_pt_addr;
|
||||
uint64_t value;
|
||||
|
||||
value = addr & 0xFFFFFFFFFFFFF000ULL;
|
||||
value |= flags;
|
||||
writeq(value, ptr + (gpu_page_idx * 8));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||
uint32_t flags)
|
||||
{
|
||||
@@ -1030,7 +1005,7 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
|
||||
r = gmc_v7_0_init_microcode(adev);
|
||||
if (r) {
|
||||
@@ -1376,7 +1351,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
|
||||
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
|
||||
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
|
||||
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
|
||||
.set_pte_pde = gmc_v7_0_set_pte_pde,
|
||||
.set_prt = gmc_v7_0_set_prt,
|
||||
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
|
||||
.get_vm_pde = gmc_v7_0_get_vm_pde
|
||||
|
@@ -433,7 +433,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
|
||||
base <<= 24;
|
||||
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc);
|
||||
}
|
||||
|
||||
@@ -662,50 +662,26 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_set_pte_pde - update the page tables using MMIO
|
||||
/*
|
||||
* PTE format on VI:
|
||||
* 63:40 reserved
|
||||
* 39:12 4k physical page base address
|
||||
* 11:7 fragment
|
||||
* 6 write
|
||||
* 5 read
|
||||
* 4 exe
|
||||
* 3 reserved
|
||||
* 2 snooped
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @cpu_pt_addr: cpu address of the page table
|
||||
* @gpu_page_idx: entry in the page table to update
|
||||
* @addr: dst addr to write into pte/pde
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CPU.
|
||||
* PDE format on VI:
|
||||
* 63:59 block fragment size
|
||||
* 58:40 reserved
|
||||
* 39:1 physical base address of PTE
|
||||
* bits 5:1 must be 0.
|
||||
* 0 valid
|
||||
*/
|
||||
static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)cpu_pt_addr;
|
||||
uint64_t value;
|
||||
|
||||
/*
|
||||
* PTE format on VI:
|
||||
* 63:40 reserved
|
||||
* 39:12 4k physical page base address
|
||||
* 11:7 fragment
|
||||
* 6 write
|
||||
* 5 read
|
||||
* 4 exe
|
||||
* 3 reserved
|
||||
* 2 snooped
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*
|
||||
* PDE format on VI:
|
||||
* 63:59 block fragment size
|
||||
* 58:40 reserved
|
||||
* 39:1 physical base address of PTE
|
||||
* bits 5:1 must be 0.
|
||||
* 0 valid
|
||||
*/
|
||||
value = addr & 0x000000FFFFFFF000ULL;
|
||||
value |= flags;
|
||||
writeq(value, ptr + (gpu_page_idx * 8));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||
uint32_t flags)
|
||||
@@ -1155,7 +1131,7 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
|
||||
r = gmc_v8_0_init_microcode(adev);
|
||||
if (r) {
|
||||
@@ -1743,7 +1719,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
|
||||
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
|
||||
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
|
||||
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
|
||||
.set_pte_pde = gmc_v8_0_set_pte_pde,
|
||||
.set_prt = gmc_v8_0_set_prt,
|
||||
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
|
||||
.get_vm_pde = gmc_v8_0_get_vm_pde
|
||||
|
@@ -47,6 +47,8 @@
|
||||
|
||||
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
|
||||
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
/* add these here since we already include dce12 headers and these are for DCN */
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
||||
@@ -84,121 +86,182 @@ static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
|
||||
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
|
||||
};
|
||||
|
||||
/* Ecc related register addresses, (BASE + reg offset) */
|
||||
/* Universal Memory Controller caps (may be fused). */
|
||||
/* UMCCH:UmcLocalCap */
|
||||
#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
|
||||
#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
|
||||
#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
|
||||
#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
|
||||
#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
|
||||
#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
|
||||
#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
|
||||
#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
|
||||
#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
|
||||
#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
|
||||
#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
|
||||
#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
|
||||
#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
|
||||
#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
|
||||
#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
|
||||
#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
|
||||
|
||||
/* Universal Memory Controller Channel config. */
|
||||
/* UMCCH:UMC_CONFIG */
|
||||
#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
|
||||
#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
|
||||
#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
|
||||
|
||||
/* Universal Memory Controller Channel Ecc config. */
|
||||
/* UMCCH:EccCtrl */
|
||||
#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
|
||||
#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
|
||||
#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
|
||||
#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
|
||||
#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
|
||||
#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
|
||||
#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
|
||||
#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
|
||||
#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
|
||||
#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
|
||||
#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
|
||||
#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
|
||||
#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
|
||||
#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
|
||||
#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
|
||||
#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
|
||||
|
||||
static const uint32_t ecc_umclocalcap_addrs[] = {
|
||||
UMCLOCALCAPS_ADDR0,
|
||||
UMCLOCALCAPS_ADDR1,
|
||||
UMCLOCALCAPS_ADDR2,
|
||||
UMCLOCALCAPS_ADDR3,
|
||||
UMCLOCALCAPS_ADDR4,
|
||||
UMCLOCALCAPS_ADDR5,
|
||||
UMCLOCALCAPS_ADDR6,
|
||||
UMCLOCALCAPS_ADDR7,
|
||||
UMCLOCALCAPS_ADDR8,
|
||||
UMCLOCALCAPS_ADDR9,
|
||||
UMCLOCALCAPS_ADDR10,
|
||||
UMCLOCALCAPS_ADDR11,
|
||||
UMCLOCALCAPS_ADDR12,
|
||||
UMCLOCALCAPS_ADDR13,
|
||||
UMCLOCALCAPS_ADDR14,
|
||||
UMCLOCALCAPS_ADDR15,
|
||||
static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
|
||||
(0x000143c0 + 0x00000000),
|
||||
(0x000143c0 + 0x00000800),
|
||||
(0x000143c0 + 0x00001000),
|
||||
(0x000143c0 + 0x00001800),
|
||||
(0x000543c0 + 0x00000000),
|
||||
(0x000543c0 + 0x00000800),
|
||||
(0x000543c0 + 0x00001000),
|
||||
(0x000543c0 + 0x00001800),
|
||||
(0x000943c0 + 0x00000000),
|
||||
(0x000943c0 + 0x00000800),
|
||||
(0x000943c0 + 0x00001000),
|
||||
(0x000943c0 + 0x00001800),
|
||||
(0x000d43c0 + 0x00000000),
|
||||
(0x000d43c0 + 0x00000800),
|
||||
(0x000d43c0 + 0x00001000),
|
||||
(0x000d43c0 + 0x00001800),
|
||||
(0x001143c0 + 0x00000000),
|
||||
(0x001143c0 + 0x00000800),
|
||||
(0x001143c0 + 0x00001000),
|
||||
(0x001143c0 + 0x00001800),
|
||||
(0x001543c0 + 0x00000000),
|
||||
(0x001543c0 + 0x00000800),
|
||||
(0x001543c0 + 0x00001000),
|
||||
(0x001543c0 + 0x00001800),
|
||||
(0x001943c0 + 0x00000000),
|
||||
(0x001943c0 + 0x00000800),
|
||||
(0x001943c0 + 0x00001000),
|
||||
(0x001943c0 + 0x00001800),
|
||||
(0x001d43c0 + 0x00000000),
|
||||
(0x001d43c0 + 0x00000800),
|
||||
(0x001d43c0 + 0x00001000),
|
||||
(0x001d43c0 + 0x00001800),
|
||||
};
|
||||
|
||||
static const uint32_t ecc_umcch_umc_config_addrs[] = {
|
||||
UMCCH_UMC_CONFIG_ADDR0,
|
||||
UMCCH_UMC_CONFIG_ADDR1,
|
||||
UMCCH_UMC_CONFIG_ADDR2,
|
||||
UMCCH_UMC_CONFIG_ADDR3,
|
||||
UMCCH_UMC_CONFIG_ADDR4,
|
||||
UMCCH_UMC_CONFIG_ADDR5,
|
||||
UMCCH_UMC_CONFIG_ADDR6,
|
||||
UMCCH_UMC_CONFIG_ADDR7,
|
||||
UMCCH_UMC_CONFIG_ADDR8,
|
||||
UMCCH_UMC_CONFIG_ADDR9,
|
||||
UMCCH_UMC_CONFIG_ADDR10,
|
||||
UMCCH_UMC_CONFIG_ADDR11,
|
||||
UMCCH_UMC_CONFIG_ADDR12,
|
||||
UMCCH_UMC_CONFIG_ADDR13,
|
||||
UMCCH_UMC_CONFIG_ADDR14,
|
||||
UMCCH_UMC_CONFIG_ADDR15,
|
||||
static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
|
||||
(0x000143e0 + 0x00000000),
|
||||
(0x000143e0 + 0x00000800),
|
||||
(0x000143e0 + 0x00001000),
|
||||
(0x000143e0 + 0x00001800),
|
||||
(0x000543e0 + 0x00000000),
|
||||
(0x000543e0 + 0x00000800),
|
||||
(0x000543e0 + 0x00001000),
|
||||
(0x000543e0 + 0x00001800),
|
||||
(0x000943e0 + 0x00000000),
|
||||
(0x000943e0 + 0x00000800),
|
||||
(0x000943e0 + 0x00001000),
|
||||
(0x000943e0 + 0x00001800),
|
||||
(0x000d43e0 + 0x00000000),
|
||||
(0x000d43e0 + 0x00000800),
|
||||
(0x000d43e0 + 0x00001000),
|
||||
(0x000d43e0 + 0x00001800),
|
||||
(0x001143e0 + 0x00000000),
|
||||
(0x001143e0 + 0x00000800),
|
||||
(0x001143e0 + 0x00001000),
|
||||
(0x001143e0 + 0x00001800),
|
||||
(0x001543e0 + 0x00000000),
|
||||
(0x001543e0 + 0x00000800),
|
||||
(0x001543e0 + 0x00001000),
|
||||
(0x001543e0 + 0x00001800),
|
||||
(0x001943e0 + 0x00000000),
|
||||
(0x001943e0 + 0x00000800),
|
||||
(0x001943e0 + 0x00001000),
|
||||
(0x001943e0 + 0x00001800),
|
||||
(0x001d43e0 + 0x00000000),
|
||||
(0x001d43e0 + 0x00000800),
|
||||
(0x001d43e0 + 0x00001000),
|
||||
(0x001d43e0 + 0x00001800),
|
||||
};
|
||||
|
||||
static const uint32_t ecc_umcch_eccctrl_addrs[] = {
|
||||
UMCCH_ECCCTRL_ADDR0,
|
||||
UMCCH_ECCCTRL_ADDR1,
|
||||
UMCCH_ECCCTRL_ADDR2,
|
||||
UMCCH_ECCCTRL_ADDR3,
|
||||
UMCCH_ECCCTRL_ADDR4,
|
||||
UMCCH_ECCCTRL_ADDR5,
|
||||
UMCCH_ECCCTRL_ADDR6,
|
||||
UMCCH_ECCCTRL_ADDR7,
|
||||
UMCCH_ECCCTRL_ADDR8,
|
||||
UMCCH_ECCCTRL_ADDR9,
|
||||
UMCCH_ECCCTRL_ADDR10,
|
||||
UMCCH_ECCCTRL_ADDR11,
|
||||
UMCCH_ECCCTRL_ADDR12,
|
||||
UMCCH_ECCCTRL_ADDR13,
|
||||
UMCCH_ECCCTRL_ADDR14,
|
||||
UMCCH_ECCCTRL_ADDR15,
|
||||
static const uint32_t ecc_umc_mcumc_status_addrs[] = {
|
||||
(0x000143c2 + 0x00000000),
|
||||
(0x000143c2 + 0x00000800),
|
||||
(0x000143c2 + 0x00001000),
|
||||
(0x000143c2 + 0x00001800),
|
||||
(0x000543c2 + 0x00000000),
|
||||
(0x000543c2 + 0x00000800),
|
||||
(0x000543c2 + 0x00001000),
|
||||
(0x000543c2 + 0x00001800),
|
||||
(0x000943c2 + 0x00000000),
|
||||
(0x000943c2 + 0x00000800),
|
||||
(0x000943c2 + 0x00001000),
|
||||
(0x000943c2 + 0x00001800),
|
||||
(0x000d43c2 + 0x00000000),
|
||||
(0x000d43c2 + 0x00000800),
|
||||
(0x000d43c2 + 0x00001000),
|
||||
(0x000d43c2 + 0x00001800),
|
||||
(0x001143c2 + 0x00000000),
|
||||
(0x001143c2 + 0x00000800),
|
||||
(0x001143c2 + 0x00001000),
|
||||
(0x001143c2 + 0x00001800),
|
||||
(0x001543c2 + 0x00000000),
|
||||
(0x001543c2 + 0x00000800),
|
||||
(0x001543c2 + 0x00001000),
|
||||
(0x001543c2 + 0x00001800),
|
||||
(0x001943c2 + 0x00000000),
|
||||
(0x001943c2 + 0x00000800),
|
||||
(0x001943c2 + 0x00001000),
|
||||
(0x001943c2 + 0x00001800),
|
||||
(0x001d43c2 + 0x00000000),
|
||||
(0x001d43c2 + 0x00000800),
|
||||
(0x001d43c2 + 0x00001000),
|
||||
(0x001d43c2 + 0x00001800),
|
||||
};
|
||||
|
||||
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 bits, i, tmp, reg;
|
||||
|
||||
bits = 0x7f;
|
||||
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
|
||||
reg = ecc_umc_mcumc_ctrl_addrs[i];
|
||||
tmp = RREG32(reg);
|
||||
tmp &= ~bits;
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
|
||||
reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
|
||||
tmp = RREG32(reg);
|
||||
tmp &= ~bits;
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
break;
|
||||
case AMDGPU_IRQ_STATE_ENABLE:
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
|
||||
reg = ecc_umc_mcumc_ctrl_addrs[i];
|
||||
tmp = RREG32(reg);
|
||||
tmp |= bits;
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
|
||||
reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
|
||||
tmp = RREG32(reg);
|
||||
tmp |= bits;
|
||||
WREG32(reg, tmp);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||
amdgpu_ras_reset_gpu(adev, 0);
|
||||
return AMDGPU_RAS_UE;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct ras_common_if *ras_if = adev->gmc.ras_if;
|
||||
struct ras_dispatch_if ih_data = {
|
||||
.entry = entry,
|
||||
};
|
||||
|
||||
if (!ras_if)
|
||||
return 0;
|
||||
|
||||
ih_data.head = *ras_if;
|
||||
|
||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *src,
|
||||
unsigned type,
|
||||
@@ -244,62 +307,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vega10_ih_prescreen_iv - prescreen an interrupt vector
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Returns true if the interrupt vector should be further processed.
|
||||
*/
|
||||
static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry,
|
||||
uint64_t addr)
|
||||
{
|
||||
struct amdgpu_vm *vm;
|
||||
u64 key;
|
||||
int r;
|
||||
|
||||
/* No PASID, can't identify faulting process */
|
||||
if (!entry->pasid)
|
||||
return true;
|
||||
|
||||
/* Not a retry fault */
|
||||
if (!(entry->src_data[1] & 0x80))
|
||||
return true;
|
||||
|
||||
/* Track retry faults in per-VM fault FIFO. */
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
|
||||
if (!vm) {
|
||||
/* VM not found, process it normally */
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
key = AMDGPU_VM_FAULT(entry->pasid, addr);
|
||||
r = amdgpu_vm_add_fault(vm->fault_hash, key);
|
||||
|
||||
/* Hash table is full or the fault is already being processed,
|
||||
* ignore further page faults
|
||||
*/
|
||||
if (r != 0) {
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return false;
|
||||
}
|
||||
/* No locking required with single writer and single reader */
|
||||
r = kfifo_put(&vm->faults, key);
|
||||
if (!r) {
|
||||
/* FIFO is full. Ignore it until there is space */
|
||||
amdgpu_vm_clear_fault(vm->fault_hash, key);
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
/* It's the first fault for this address, process it normally */
|
||||
return true;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@@ -312,9 +319,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
|
||||
addr = (u64)entry->src_data[0] << 12;
|
||||
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
|
||||
|
||||
if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
|
||||
if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
|
||||
entry->timestamp))
|
||||
return 1; /* This also prevents sending it to KFD */
|
||||
|
||||
/* If it's the first fault for this address, process it normally */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
status = RREG32(hub->vm_l2_pro_fault_status);
|
||||
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
|
||||
@@ -350,10 +359,19 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
|
||||
.process = gmc_v9_0_process_interrupt,
|
||||
};
|
||||
|
||||
|
||||
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
|
||||
.set = gmc_v9_0_ecc_interrupt_state,
|
||||
.process = gmc_v9_0_process_ecc_irq,
|
||||
};
|
||||
|
||||
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->gmc.vm_fault.num_types = 1;
|
||||
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
|
||||
|
||||
adev->gmc.ecc_irq.num_types = 1;
|
||||
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
|
||||
}
|
||||
|
||||
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
||||
@@ -466,64 +484,37 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
|
||||
amdgpu_ring_emit_wreg(ring, reg, pasid);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_set_pte_pde - update the page tables using MMIO
|
||||
/*
|
||||
* PTE format on VEGA 10:
|
||||
* 63:59 reserved
|
||||
* 58:57 mtype
|
||||
* 56 F
|
||||
* 55 L
|
||||
* 54 P
|
||||
* 53 SW
|
||||
* 52 T
|
||||
* 50:48 reserved
|
||||
* 47:12 4k physical page base address
|
||||
* 11:7 fragment
|
||||
* 6 write
|
||||
* 5 read
|
||||
* 4 exe
|
||||
* 3 Z
|
||||
* 2 snooped
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @cpu_pt_addr: cpu address of the page table
|
||||
* @gpu_page_idx: entry in the page table to update
|
||||
* @addr: dst addr to write into pte/pde
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CPU.
|
||||
* PDE format on VEGA 10:
|
||||
* 63:59 block fragment size
|
||||
* 58:55 reserved
|
||||
* 54 P
|
||||
* 53:48 reserved
|
||||
* 47:6 physical base address of PD or PTE
|
||||
* 5:3 reserved
|
||||
* 2 C
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*/
|
||||
static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
||||
uint32_t gpu_page_idx, uint64_t addr,
|
||||
uint64_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)cpu_pt_addr;
|
||||
uint64_t value;
|
||||
|
||||
/*
|
||||
* PTE format on VEGA 10:
|
||||
* 63:59 reserved
|
||||
* 58:57 mtype
|
||||
* 56 F
|
||||
* 55 L
|
||||
* 54 P
|
||||
* 53 SW
|
||||
* 52 T
|
||||
* 50:48 reserved
|
||||
* 47:12 4k physical page base address
|
||||
* 11:7 fragment
|
||||
* 6 write
|
||||
* 5 read
|
||||
* 4 exe
|
||||
* 3 Z
|
||||
* 2 snooped
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*
|
||||
* PDE format on VEGA 10:
|
||||
* 63:59 block fragment size
|
||||
* 58:55 reserved
|
||||
* 54 P
|
||||
* 53:48 reserved
|
||||
* 47:6 physical base address of PD or PTE
|
||||
* 5:3 reserved
|
||||
* 2 C
|
||||
* 1 system
|
||||
* 0 valid
|
||||
*/
|
||||
|
||||
/*
|
||||
* The following is for PTE only. GART does not have PDEs.
|
||||
*/
|
||||
value = addr & 0x0000FFFFFFFFF000ULL;
|
||||
value |= flags;
|
||||
writeq(value, ptr + (gpu_page_idx * 8));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
|
||||
uint32_t flags)
|
||||
@@ -593,7 +584,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
||||
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
|
||||
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
|
||||
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
|
||||
.set_pte_pde = gmc_v9_0_set_pte_pde,
|
||||
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
|
||||
.get_vm_pde = gmc_v9_0_get_vm_pde
|
||||
};
|
||||
@@ -620,85 +610,6 @@ static int gmc_v9_0_early_init(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t reg_val;
|
||||
uint32_t reg_addr;
|
||||
uint32_t field_val;
|
||||
size_t i;
|
||||
uint32_t fv2;
|
||||
size_t lost_sheep;
|
||||
|
||||
DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
|
||||
|
||||
lost_sheep = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
|
||||
reg_addr = ecc_umclocalcap_addrs[i];
|
||||
DRM_DEBUG("ecc: "
|
||||
"UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
|
||||
i, reg_addr);
|
||||
reg_val = RREG32(reg_addr);
|
||||
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
|
||||
EccDis);
|
||||
DRM_DEBUG("ecc: "
|
||||
"reg_val: 0x%08x, "
|
||||
"EccDis: 0x%08x, ",
|
||||
reg_val, field_val);
|
||||
if (field_val) {
|
||||
DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
|
||||
++lost_sheep;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
|
||||
reg_addr = ecc_umcch_umc_config_addrs[i];
|
||||
DRM_DEBUG("ecc: "
|
||||
"UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
|
||||
i, reg_addr);
|
||||
reg_val = RREG32(reg_addr);
|
||||
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
|
||||
DramReady);
|
||||
DRM_DEBUG("ecc: "
|
||||
"reg_val: 0x%08x, "
|
||||
"DramReady: 0x%08x\n",
|
||||
reg_val, field_val);
|
||||
|
||||
if (!field_val) {
|
||||
DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
|
||||
++lost_sheep;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
|
||||
reg_addr = ecc_umcch_eccctrl_addrs[i];
|
||||
DRM_DEBUG("ecc: "
|
||||
"UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
|
||||
i, reg_addr);
|
||||
reg_val = RREG32(reg_addr);
|
||||
field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
|
||||
WrEccEn);
|
||||
fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
|
||||
RdEccEn);
|
||||
DRM_DEBUG("ecc: "
|
||||
"reg_val: 0x%08x, "
|
||||
"WrEccEn: 0x%08x, "
|
||||
"RdEccEn: 0x%08x\n",
|
||||
reg_val, field_val, fv2);
|
||||
|
||||
if (!field_val) {
|
||||
DRM_DEBUG("ecc: WrEccEn is not set\n");
|
||||
++lost_sheep;
|
||||
}
|
||||
if (!fv2) {
|
||||
DRM_DEBUG("ecc: RdEccEn is not set\n");
|
||||
++lost_sheep;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
|
||||
return lost_sheep == 0;
|
||||
}
|
||||
|
||||
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
@@ -751,10 +662,82 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_ecc_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ras_common_if **ras_if = &adev->gmc.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = gmc_v9_0_process_ras_data_cb,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "umc_err_count",
|
||||
.debugfs_name = "umc_err_inject",
|
||||
};
|
||||
struct ras_common_if ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__UMC,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "umc",
|
||||
};
|
||||
int r;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
|
||||
return 0;
|
||||
}
|
||||
/* handle resume path. */
|
||||
if (*ras_if)
|
||||
goto resume;
|
||||
|
||||
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
|
||||
if (!*ras_if)
|
||||
return -ENOMEM;
|
||||
|
||||
**ras_if = ras_block;
|
||||
|
||||
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||
if (r)
|
||||
goto feature;
|
||||
|
||||
ih_info.head = **ras_if;
|
||||
fs_info.head = **ras_if;
|
||||
|
||||
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||
if (r)
|
||||
goto interrupt;
|
||||
|
||||
r = amdgpu_ras_debugfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto debugfs;
|
||||
|
||||
r = amdgpu_ras_sysfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto sysfs;
|
||||
resume:
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
goto irq;
|
||||
|
||||
return 0;
|
||||
irq:
|
||||
amdgpu_ras_sysfs_remove(adev, *ras_if);
|
||||
sysfs:
|
||||
amdgpu_ras_debugfs_remove(adev, *ras_if);
|
||||
debugfs:
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
interrupt:
|
||||
amdgpu_ras_feature_enable(adev, *ras_if, 0);
|
||||
feature:
|
||||
kfree(*ras_if);
|
||||
*ras_if = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
static int gmc_v9_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
bool r;
|
||||
|
||||
if (!gmc_v9_0_keep_stolen_memory(adev))
|
||||
amdgpu_bo_late_init(adev);
|
||||
@@ -762,20 +745,36 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
r = gmc_v9_0_allocate_vm_inv_eng(adev);
|
||||
if (r)
|
||||
return r;
|
||||
/* Check if ecc is available */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA20:
|
||||
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
|
||||
adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
} else {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
if (r == 1) {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
} else if (r == 0) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
} else {
|
||||
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
|
||||
return r;
|
||||
r = amdgpu_atomfirmware_sram_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("SRAM ECC is not present.\n");
|
||||
} else {
|
||||
DRM_INFO("SRAM ECC is active.\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
r = gmc_v9_0_ecc_late_init(handle);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
|
||||
}
|
||||
|
||||
@@ -787,7 +786,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
base = mmhub_v1_0_get_fb_location(adev);
|
||||
/* add the xgmi offset of the physical node */
|
||||
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
@@ -987,6 +986,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* interrupt sent to DF. */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
|
||||
&adev->gmc.ecc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Set the internal MC address mask
|
||||
* This is the max address of the GPU's
|
||||
* internal address space.
|
||||
@@ -1011,7 +1016,7 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = gfxhub_v1_1_get_xgmi_info(adev);
|
||||
@@ -1052,6 +1057,22 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
|
||||
adev->gmc.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->gmc.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
};
|
||||
|
||||
/*remove fs first*/
|
||||
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||
/*remove the IH*/
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||
kfree(ras_if);
|
||||
}
|
||||
|
||||
amdgpu_gem_force_release(adev);
|
||||
amdgpu_vm_manager_fini(adev);
|
||||
|
||||
@@ -1198,6 +1219,7 @@ static int gmc_v9_0_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v9_0_gart_disable(adev);
|
||||
|
||||
|
@@ -2824,7 +2824,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
|
||||
pi->caps_tcp_ramping = true;
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
|
||||
pi->caps_sclk_ds = true;
|
||||
else
|
||||
pi->caps_sclk_ds = false;
|
||||
|
@@ -163,7 +163,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
/* XXX for emulation, Refer to closed source code.*/
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
|
||||
0);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
|
||||
@@ -256,7 +256,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
|
||||
block_size);
|
||||
/* Send no-retry XNACK on fault to suppress VM fault storm. */
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
|
||||
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
|
||||
|
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
|
||||
xgpu_ai_mailbox_set_valid(adev, false);
|
||||
}
|
||||
|
||||
static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
|
||||
{
|
||||
int r = 0;
|
||||
u32 req, val, size;
|
||||
|
||||
if (!amdgim_is_hwperf(adev) || buf == NULL)
|
||||
return -EBADRQC;
|
||||
|
||||
switch(type) {
|
||||
case PP_SCLK:
|
||||
req = IDH_IRQ_GET_PP_SCLK;
|
||||
break;
|
||||
case PP_MCLK:
|
||||
req = IDH_IRQ_GET_PP_MCLK;
|
||||
break;
|
||||
default:
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->virt.dpm_mutex);
|
||||
|
||||
xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
|
||||
|
||||
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
|
||||
if (!r && adev->fw_vram_usage.va != NULL) {
|
||||
val = RREG32_NO_KIQ(
|
||||
SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
|
||||
size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
|
||||
val), PAGE_SIZE);
|
||||
|
||||
if (size < PAGE_SIZE)
|
||||
strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
|
||||
else
|
||||
size = 0;
|
||||
|
||||
r = size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
|
||||
if(r)
|
||||
pr_info("%s DPM request failed",
|
||||
(type == PP_SCLK)? "SCLK" : "MCLK");
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->virt.dpm_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
|
||||
{
|
||||
int r = 0;
|
||||
u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
|
||||
|
||||
if (!amdgim_is_hwperf(adev))
|
||||
return -EBADRQC;
|
||||
|
||||
mutex_lock(&adev->virt.dpm_mutex);
|
||||
xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
|
||||
|
||||
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
|
||||
if (!r)
|
||||
goto out;
|
||||
|
||||
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
|
||||
if (!r)
|
||||
pr_info("DPM request failed");
|
||||
else
|
||||
pr_info("Mailbox is broken");
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->virt.dpm_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
||||
enum idh_request req)
|
||||
{
|
||||
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
|
||||
.reset_gpu = xgpu_ai_request_reset,
|
||||
.wait_reset = NULL,
|
||||
.trans_msg = xgpu_ai_mailbox_trans_msg,
|
||||
.get_pp_clk = xgpu_ai_get_pp_clk,
|
||||
.force_dpm_level = xgpu_ai_force_dpm_level,
|
||||
};
|
||||
|
@@ -35,6 +35,10 @@ enum idh_request {
|
||||
IDH_REL_GPU_FINI_ACCESS,
|
||||
IDH_REQ_GPU_RESET_ACCESS,
|
||||
|
||||
IDH_IRQ_FORCE_DPM_LEVEL = 10,
|
||||
IDH_IRQ_GET_PP_SCLK,
|
||||
IDH_IRQ_GET_PP_MCLK,
|
||||
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
};
|
||||
|
||||
@@ -43,6 +47,8 @@ enum idh_event {
|
||||
IDH_READY_TO_ACCESS_GPU,
|
||||
IDH_FLR_NOTIFICATION,
|
||||
IDH_FLR_NOTIFICATION_CMPL,
|
||||
IDH_SUCCESS,
|
||||
IDH_FAIL,
|
||||
IDH_EVENT_MAX
|
||||
};
|
||||
|
||||
|
@@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
|
||||
|
||||
/* wait until RCV_MSG become 3 */
|
||||
if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
|
||||
pr_err("failed to recieve FLR_CMPL\n");
|
||||
pr_err("failed to receive FLR_CMPL\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -118,7 +118,8 @@ static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
|
||||
|
||||
if (use_doorbell) {
|
||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
|
||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
|
||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
|
||||
BIF_IH_DOORBELL_RANGE, SIZE, 6);
|
||||
} else
|
||||
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
|
||||
|
||||
|
@@ -43,6 +43,7 @@ enum psp_gfx_crtl_cmd_id
|
||||
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
|
||||
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
|
||||
|
||||
|
@@ -33,6 +33,9 @@
|
||||
#include "sdma0/sdma0_4_0_offset.h"
|
||||
#include "nbio/nbio_7_4_offset.h"
|
||||
|
||||
#include "oss/osssys_4_0_offset.h"
|
||||
#include "oss/osssys_4_0_sh_mask.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
|
||||
@@ -113,6 +116,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
|
||||
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
|
||||
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
|
||||
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
|
||||
|
||||
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
|
||||
|
||||
adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
|
||||
adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
|
||||
adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
|
||||
le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -217,6 +227,37 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void psp_v11_0_reroute_ih(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t tmp;
|
||||
|
||||
/* Change IH ring for VMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
|
||||
/* Change IH ring for UMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
}
|
||||
|
||||
static int psp_v11_0_ring_init(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
@@ -224,6 +265,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
|
||||
struct psp_ring *ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
psp_v11_0_reroute_ih(psp);
|
||||
|
||||
ring = &psp->km_ring;
|
||||
|
||||
ring->ring_type = ring_type;
|
||||
@@ -631,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
|
||||
for (i = 0; i < topology_info_input->num_nodes; i++) {
|
||||
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
|
||||
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
|
||||
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
|
||||
topology_info_input->nodes[i].is_sharing_enabled = 1;
|
||||
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
|
||||
}
|
||||
|
||||
@@ -679,6 +722,54 @@ static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
|
||||
struct ta_ras_trigger_error_input *info)
|
||||
{
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
|
||||
ras_cmd->ras_in_message.trigger_error = *info;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return ras_cmd->ras_status;
|
||||
}
|
||||
|
||||
static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
|
||||
{
|
||||
#if 0
|
||||
// not support yet.
|
||||
struct ta_ras_shared_memory *ras_cmd;
|
||||
int ret;
|
||||
|
||||
if (!psp->ras.ras_initialized)
|
||||
return -EINVAL;
|
||||
|
||||
ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
|
||||
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
|
||||
|
||||
ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
|
||||
ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
|
||||
|
||||
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return ras_cmd->ras_status;
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct psp_funcs psp_v11_0_funcs = {
|
||||
.init_microcode = psp_v11_0_init_microcode,
|
||||
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
|
||||
@@ -695,6 +786,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
|
||||
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
|
||||
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
|
||||
.support_vmr_ring = psp_v11_0_support_vmr_ring,
|
||||
.ras_trigger_error = psp_v11_0_ras_trigger_error,
|
||||
.ras_cure_posion = psp_v11_0_ras_cure_posion,
|
||||
};
|
||||
|
||||
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
|
||||
|
@@ -37,6 +37,9 @@
|
||||
#include "sdma0/sdma0_4_0_offset.h"
|
||||
#include "nbio/nbio_6_1_offset.h"
|
||||
|
||||
#include "oss/osssys_4_0_offset.h"
|
||||
#include "oss/osssys_4_0_sh_mask.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
|
||||
@@ -252,6 +255,37 @@ static int psp_v3_1_ring_init(struct psp_context *psp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void psp_v3_1_reroute_ih(struct psp_context *psp)
|
||||
{
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
uint32_t tmp;
|
||||
|
||||
/* Change IH ring for VMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
|
||||
/* Change IH ring for UMC */
|
||||
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
|
||||
tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
|
||||
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
|
||||
|
||||
mdelay(20);
|
||||
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||
0x80000000, 0x8000FFFF, false);
|
||||
}
|
||||
|
||||
static int psp_v3_1_ring_create(struct psp_context *psp,
|
||||
enum psp_ring_type ring_type)
|
||||
{
|
||||
@@ -260,6 +294,8 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
|
||||
struct psp_ring *ring = &psp->km_ring;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
psp_v3_1_reroute_ih(psp);
|
||||
|
||||
/* Write low address of the ring to C2PMSG_69 */
|
||||
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
|
||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
|
||||
|
@@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
u32 sdma_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_SDMA_IRQ_TRAP0:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE0:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
@@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_SDMA_IRQ_TRAP1:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE1:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
|
@@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
u32 sdma_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_SDMA_IRQ_TRAP0:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE0:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
@@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_SDMA_IRQ_TRAP1:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE1:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
|
@@ -41,6 +41,8 @@
|
||||
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
|
||||
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
|
||||
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
|
||||
@@ -154,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
|
||||
@@ -184,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
|
||||
@@ -849,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
|
||||
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
|
||||
SDMA0_GFX_RB_WPTR_POLL_CNTL,
|
||||
F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
|
||||
F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
|
||||
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
|
||||
|
||||
/* enable DMA RB */
|
||||
@@ -940,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
|
||||
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
|
||||
SDMA0_PAGE_RB_WPTR_POLL_CNTL,
|
||||
F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
|
||||
F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
|
||||
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
|
||||
|
||||
/* enable DMA RB */
|
||||
@@ -1493,6 +1493,87 @@ static int sdma_v4_0_early_init(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
|
||||
static int sdma_v4_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct ras_common_if **ras_if = &adev->sdma.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.cb = sdma_v4_0_process_ras_data_cb,
|
||||
};
|
||||
struct ras_fs_if fs_info = {
|
||||
.sysfs_name = "sdma_err_count",
|
||||
.debugfs_name = "sdma_err_inject",
|
||||
};
|
||||
struct ras_common_if ras_block = {
|
||||
.block = AMDGPU_RAS_BLOCK__SDMA,
|
||||
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
|
||||
.sub_block_index = 0,
|
||||
.name = "sdma",
|
||||
};
|
||||
int r;
|
||||
|
||||
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
|
||||
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle resume path. */
|
||||
if (*ras_if)
|
||||
goto resume;
|
||||
|
||||
*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
|
||||
if (!*ras_if)
|
||||
return -ENOMEM;
|
||||
|
||||
**ras_if = ras_block;
|
||||
|
||||
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
|
||||
if (r)
|
||||
goto feature;
|
||||
|
||||
ih_info.head = **ras_if;
|
||||
fs_info.head = **ras_if;
|
||||
|
||||
r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
|
||||
if (r)
|
||||
goto interrupt;
|
||||
|
||||
r = amdgpu_ras_debugfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto debugfs;
|
||||
|
||||
r = amdgpu_ras_sysfs_create(adev, &fs_info);
|
||||
if (r)
|
||||
goto sysfs;
|
||||
resume:
|
||||
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
|
||||
if (r)
|
||||
goto irq;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r) {
|
||||
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
|
||||
goto irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
irq:
|
||||
amdgpu_ras_sysfs_remove(adev, *ras_if);
|
||||
sysfs:
|
||||
amdgpu_ras_debugfs_remove(adev, *ras_if);
|
||||
debugfs:
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
interrupt:
|
||||
amdgpu_ras_feature_enable(adev, *ras_if, 0);
|
||||
feature:
|
||||
kfree(*ras_if);
|
||||
*ras_if = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
@@ -1511,6 +1592,18 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA SRAM ECC event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
|
||||
&adev->sdma.ecc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA SRAM ECC event */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
|
||||
&adev->sdma.ecc_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
@@ -1526,8 +1619,8 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -1546,8 +1639,8 @@ static int sdma_v4_0_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -1561,6 +1654,22 @@ static int sdma_v4_0_sw_fini(void *handle)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
|
||||
adev->sdma.ras_if) {
|
||||
struct ras_common_if *ras_if = adev->sdma.ras_if;
|
||||
struct ras_ih_if ih_info = {
|
||||
.head = *ras_if,
|
||||
};
|
||||
|
||||
/*remove fs first*/
|
||||
amdgpu_ras_debugfs_remove(adev, ras_if);
|
||||
amdgpu_ras_sysfs_remove(adev, ras_if);
|
||||
/*remove the IH*/
|
||||
amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
|
||||
amdgpu_ras_feature_enable(adev, ras_if, 0);
|
||||
kfree(ras_if);
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
if (adev->sdma.has_page_queue)
|
||||
@@ -1598,6 +1707,9 @@ static int sdma_v4_0_hw_fini(void *handle)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
|
||||
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
|
||||
sdma_v4_0_ctx_switch_enable(adev, false);
|
||||
sdma_v4_0_enable(adev, false);
|
||||
|
||||
@@ -1666,13 +1778,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
|
||||
u32 sdma_cntl;
|
||||
|
||||
sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
|
||||
sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
|
||||
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
|
||||
WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1714,6 +1825,58 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t instance, err_source;
|
||||
|
||||
switch (entry->client_id) {
|
||||
case SOC15_IH_CLIENTID_SDMA0:
|
||||
instance = 0;
|
||||
break;
|
||||
case SOC15_IH_CLIENTID_SDMA1:
|
||||
instance = 1;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (entry->src_id) {
|
||||
case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
|
||||
err_source = 0;
|
||||
break;
|
||||
case SDMA0_4_0__SRCID__SDMA_ECC:
|
||||
err_source = 1;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
|
||||
|
||||
amdgpu_ras_reset_gpu(adev, 0);
|
||||
|
||||
return AMDGPU_RAS_UE;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
struct ras_common_if *ras_if = adev->sdma.ras_if;
|
||||
struct ras_dispatch_if ih_data = {
|
||||
.entry = entry,
|
||||
};
|
||||
|
||||
if (!ras_if)
|
||||
return 0;
|
||||
|
||||
ih_data.head = *ras_if;
|
||||
|
||||
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@@ -1741,6 +1904,25 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 sdma_edc_config;
|
||||
|
||||
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
|
||||
sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
|
||||
sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
|
||||
|
||||
sdma_edc_config = RREG32(reg_offset);
|
||||
sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
|
||||
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
|
||||
WREG32(reg_offset, sdma_edc_config);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sdma_v4_0_update_medium_grain_clock_gating(
|
||||
struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
@@ -1906,7 +2088,7 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
|
||||
.name = "sdma_v4_0",
|
||||
.early_init = sdma_v4_0_early_init,
|
||||
.late_init = NULL,
|
||||
.late_init = sdma_v4_0_late_init,
|
||||
.sw_init = sdma_v4_0_sw_init,
|
||||
.sw_fini = sdma_v4_0_sw_fini,
|
||||
.hw_init = sdma_v4_0_hw_init,
|
||||
@@ -2008,11 +2190,20 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
|
||||
.process = sdma_v4_0_process_illegal_inst_irq,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
|
||||
.set = sdma_v4_0_set_ecc_irq_state,
|
||||
.process = sdma_v4_0_process_ecc_irq,
|
||||
};
|
||||
|
||||
|
||||
|
||||
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
|
||||
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
|
||||
adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2077,8 +2268,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
|
||||
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
|
||||
if (adev->sdma.has_page_queue)
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
|
||||
if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
|
||||
else
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
@@ -2097,15 +2288,22 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
||||
unsigned i;
|
||||
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (adev->sdma.has_page_queue)
|
||||
if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
|
||||
for (i = 1; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].page.sched;
|
||||
else
|
||||
adev->vm_manager.vm_pte_rqs[i - 1] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
|
||||
adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
|
||||
} else {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
||||
|
@@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle)
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 :
|
||||
AMDGPU_SDMA_IRQ_TRAP1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
u32 sdma_cntl;
|
||||
|
||||
switch (type) {
|
||||
case AMDGPU_SDMA_IRQ_TRAP0:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE0:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
|
||||
@@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_SDMA_IRQ_TRAP1:
|
||||
case AMDGPU_SDMA_IRQ_INSTANCE1:
|
||||
switch (state) {
|
||||
case AMDGPU_IRQ_STATE_DISABLE:
|
||||
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
|
||||
|
@@ -4098,14 +4098,13 @@ static int si_notify_smc_display_change(struct amdgpu_device *adev,
|
||||
|
||||
static void si_program_response_times(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
|
||||
u32 voltage_response_time, acpi_delay_time, vbi_time_out;
|
||||
u32 vddc_dly, acpi_dly, vbi_dly;
|
||||
u32 reference_clock;
|
||||
|
||||
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
|
||||
|
||||
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
|
||||
backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
|
||||
|
||||
if (voltage_response_time == 0)
|
||||
voltage_response_time = 1000;
|
||||
|
@@ -63,6 +63,7 @@
|
||||
#include "vcn_v1_0.h"
|
||||
#include "dce_virtual.h"
|
||||
#include "mxgpu_ai.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
|
||||
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
|
||||
@@ -392,6 +393,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
|
||||
static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 i;
|
||||
int ret = 0;
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||
|
||||
@@ -402,7 +404,9 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
|
||||
pci_save_state(adev->pdev);
|
||||
|
||||
psp_gpu_reset(adev);
|
||||
ret = psp_gpu_reset(adev);
|
||||
if (ret)
|
||||
dev_err(adev->dev, "GPU mode1 reset failed\n");
|
||||
|
||||
pci_restore_state(adev->pdev);
|
||||
|
||||
@@ -417,7 +421,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
|
||||
@@ -451,6 +455,8 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
|
||||
|
||||
dev_info(adev->dev, "GPU BACO reset\n");
|
||||
|
||||
adev->in_baco_reset = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -461,8 +467,15 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
soc15_asic_get_baco_capability(adev, &baco_reset);
|
||||
break;
|
||||
case CHIP_VEGA20:
|
||||
if (adev->psp.sos_fw_version >= 0x80067)
|
||||
soc15_asic_get_baco_capability(adev, &baco_reset);
|
||||
else
|
||||
baco_reset = false;
|
||||
break;
|
||||
default:
|
||||
baco_reset = false;
|
||||
break;
|
||||
@@ -602,8 +615,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
}
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
if (is_support_sw_smu(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
else
|
||||
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
|
||||
}
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
@@ -884,7 +901,8 @@ static int soc15_common_early_init(void *handle)
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
|
||||
} else if (adev->pdev->device == 0x15d8) {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
@@ -927,7 +945,7 @@ static int soc15_common_early_init(void *handle)
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
|
||||
}
|
||||
|
||||
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
|
108
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
Normal file
108
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
Normal file
@@ -0,0 +1,108 @@
|
||||
/****************************************************************************\
|
||||
*
|
||||
* File Name ta_ras_if.h
|
||||
* Project AMD PSP SW IP Module
|
||||
*
|
||||
* Description Interface to the RAS Trusted Application
|
||||
*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
|
||||
* and associated documentation files (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef _TA_RAS_IF_H
|
||||
#define _TA_RAS_IF_H
|
||||
|
||||
/* Responses have bit 31 set */
|
||||
#define RSP_ID_MASK (1U << 31)
|
||||
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
|
||||
|
||||
#define TA_NUM_BLOCK_MAX 14
|
||||
|
||||
enum ras_command {
|
||||
TA_RAS_COMMAND__ENABLE_FEATURES = 0,
|
||||
TA_RAS_COMMAND__DISABLE_FEATURES,
|
||||
TA_RAS_COMMAND__TRIGGER_ERROR,
|
||||
};
|
||||
|
||||
enum ta_ras_status {
|
||||
TA_RAS_STATUS__SUCCESS = 0x00,
|
||||
TA_RAS_STATUS__RESET_NEEDED = 0x01,
|
||||
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
|
||||
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
|
||||
TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
|
||||
TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05
|
||||
};
|
||||
|
||||
enum ta_ras_block {
|
||||
TA_RAS_BLOCK__UMC = 0,
|
||||
TA_RAS_BLOCK__SDMA,
|
||||
TA_RAS_BLOCK__GFX,
|
||||
TA_RAS_BLOCK__MMHUB,
|
||||
TA_RAS_BLOCK__ATHUB,
|
||||
TA_RAS_BLOCK__PCIE_BIF,
|
||||
TA_RAS_BLOCK__HDP,
|
||||
TA_RAS_BLOCK__XGMI_WAFL,
|
||||
TA_RAS_BLOCK__DF,
|
||||
TA_RAS_BLOCK__SMN,
|
||||
TA_RAS_BLOCK__SEM,
|
||||
TA_RAS_BLOCK__MP0,
|
||||
TA_RAS_BLOCK__MP1,
|
||||
TA_RAS_BLOCK__FUSE = (TA_NUM_BLOCK_MAX - 1),
|
||||
};
|
||||
|
||||
enum ta_ras_error_type {
|
||||
TA_RAS_ERROR__NONE = 0,
|
||||
TA_RAS_ERROR__PARITY = 1,
|
||||
TA_RAS_ERROR__SINGLE_CORRECTABLE = 2,
|
||||
TA_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
|
||||
TA_RAS_ERROR__POISON = 8
|
||||
};
|
||||
|
||||
struct ta_ras_enable_features_input {
|
||||
enum ta_ras_block block_id;
|
||||
enum ta_ras_error_type error_type;
|
||||
};
|
||||
|
||||
struct ta_ras_disable_features_input {
|
||||
enum ta_ras_block block_id;
|
||||
enum ta_ras_error_type error_type;
|
||||
};
|
||||
|
||||
struct ta_ras_trigger_error_input {
|
||||
enum ta_ras_block block_id;
|
||||
enum ta_ras_error_type inject_error_type;
|
||||
uint32_t sub_block_index;
|
||||
uint64_t address;
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
union ta_ras_cmd_input {
|
||||
struct ta_ras_enable_features_input enable_features;
|
||||
struct ta_ras_disable_features_input disable_features;
|
||||
struct ta_ras_trigger_error_input trigger_error;
|
||||
};
|
||||
|
||||
struct ta_ras_shared_memory {
|
||||
uint32_t cmd_id;
|
||||
uint32_t resp_id;
|
||||
enum ta_ras_status ras_status;
|
||||
uint32_t reserved;
|
||||
union ta_ras_cmd_input ras_in_message;
|
||||
};
|
||||
|
||||
#endif // TL_RAS_IF_H_
|
@@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
if (vce_v2_0_wait_for_idle(adev)) {
|
||||
DRM_INFO("VCE is busy, Can't set clock gateing");
|
||||
DRM_INFO("VCE is busy, Can't set clock gating");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -382,6 +382,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev)
|
||||
static int vce_v4_0_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
/* Disable VCPU */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
|
||||
|
||||
/* hold on ECPU */
|
||||
@@ -389,8 +390,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
|
||||
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
|
||||
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
|
||||
|
||||
/* clear BUSY flag */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
|
||||
/* clear VCE_STATUS */
|
||||
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
|
||||
|
||||
/* Set Clock-Gating off */
|
||||
/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
|
||||
@@ -922,6 +923,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vce_v4_0_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
@@ -935,16 +937,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE)
|
||||
/* XXX do we need a vce_v4_0_stop()? */
|
||||
return 0;
|
||||
return vce_v4_0_stop(adev);
|
||||
else
|
||||
return vce_v4_0_start(adev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib, uint32_t flags)
|
||||
@@ -1059,7 +1056,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = {
|
||||
.soft_reset = NULL /* vce_v4_0_soft_reset */,
|
||||
.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
|
||||
.set_clockgating_state = vce_v4_0_set_clockgating_state,
|
||||
.set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
|
||||
.set_powergating_state = vce_v4_0_set_powergating_state,
|
||||
};
|
||||
|
||||
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
|
||||
|
@@ -136,6 +136,25 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
|
||||
return ih_rb_cntl;
|
||||
}
|
||||
|
||||
static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
|
||||
{
|
||||
u32 ih_doorbell_rtpr = 0;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR, OFFSET,
|
||||
ih->doorbell_index);
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR,
|
||||
ENABLE, 1);
|
||||
} else {
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR,
|
||||
ENABLE, 0);
|
||||
}
|
||||
return ih_doorbell_rtpr;
|
||||
}
|
||||
|
||||
/**
|
||||
* vega10_ih_irq_init - init and enable the interrupt ring
|
||||
*
|
||||
@@ -150,8 +169,8 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
|
||||
static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ih_ring *ih;
|
||||
u32 ih_rb_cntl;
|
||||
int ret = 0;
|
||||
u32 ih_rb_cntl, ih_doorbell_rtpr;
|
||||
u32 tmp;
|
||||
|
||||
/* disable irqs */
|
||||
@@ -177,23 +196,11 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
upper_32_bits(ih->wptr_addr) & 0xFFFF);
|
||||
|
||||
/* set rptr, wptr to 0 */
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
|
||||
|
||||
ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
|
||||
if (adev->irq.ih.use_doorbell) {
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR, OFFSET,
|
||||
adev->irq.ih.doorbell_index);
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR,
|
||||
ENABLE, 1);
|
||||
} else {
|
||||
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
|
||||
IH_DOORBELL_RPTR,
|
||||
ENABLE, 0);
|
||||
}
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
|
||||
vega10_ih_doorbell_rptr(ih));
|
||||
|
||||
ih = &adev->irq.ih1;
|
||||
if (ih->ring_size) {
|
||||
@@ -203,11 +210,18 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
|
||||
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
|
||||
WPTR_OVERFLOW_ENABLE, 0);
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
|
||||
RB_FULL_DRAIN_ENABLE, 1);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
|
||||
|
||||
/* set rptr, wptr to 0 */
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
|
||||
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
|
||||
vega10_ih_doorbell_rptr(ih));
|
||||
}
|
||||
|
||||
ih = &adev->irq.ih2;
|
||||
@@ -216,13 +230,16 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
|
||||
(ih->gpu_addr >> 40) & 0xff);
|
||||
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
|
||||
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
|
||||
|
||||
/* set rptr, wptr to 0 */
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
|
||||
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
|
||||
vega10_ih_doorbell_rptr(ih));
|
||||
}
|
||||
|
||||
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
||||
@@ -449,20 +466,23 @@ static int vega10_ih_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* TODO add doorbell for IH1 & IH2 as well */
|
||||
adev->irq.ih.use_doorbell = true;
|
||||
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->irq.ih1.use_doorbell = true;
|
||||
adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
|
||||
|
||||
r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
adev->irq.ih2.use_doorbell = true;
|
||||
adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
|
||||
|
||||
r = amdgpu_irq_init(adev);
|
||||
|
||||
return r;
|
||||
|
@@ -467,6 +467,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
|
||||
memset(&kfd->doorbell_available_index, 0,
|
||||
sizeof(kfd->doorbell_available_index));
|
||||
|
||||
atomic_set(&kfd->sram_ecc_flag, 0);
|
||||
|
||||
return kfd;
|
||||
}
|
||||
|
||||
@@ -492,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
|
||||
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
|
||||
KGD_ENGINE_MEC1);
|
||||
kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
|
||||
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
|
||||
KGD_ENGINE_SDMA1);
|
||||
kfd->shared_resources = *gpu_resources;
|
||||
|
||||
@@ -662,6 +664,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
return ret;
|
||||
count = atomic_dec_return(&kfd_locked);
|
||||
WARN_ONCE(count != 0, "KFD reset ref. error");
|
||||
|
||||
atomic_set(&kfd->sram_ecc_flag, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1025,6 +1030,12 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
if (kfd)
|
||||
atomic_inc(&kfd->sram_ecc_flag);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
/* This function will send a package to HIQ to hang the HWS
|
||||
|
@@ -1011,25 +1011,41 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
|
||||
void kfd_signal_reset_event(struct kfd_dev *dev)
|
||||
{
|
||||
struct kfd_hsa_hw_exception_data hw_exception_data;
|
||||
struct kfd_hsa_memory_exception_data memory_exception_data;
|
||||
struct kfd_process *p;
|
||||
struct kfd_event *ev;
|
||||
unsigned int temp;
|
||||
uint32_t id, idx;
|
||||
int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
|
||||
KFD_HW_EXCEPTION_ECC :
|
||||
KFD_HW_EXCEPTION_GPU_HANG;
|
||||
|
||||
/* Whole gpu reset caused by GPU hang and memory is lost */
|
||||
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
|
||||
hw_exception_data.gpu_id = dev->id;
|
||||
hw_exception_data.memory_lost = 1;
|
||||
hw_exception_data.reset_cause = reset_cause;
|
||||
|
||||
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
|
||||
memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
|
||||
memory_exception_data.gpu_id = dev->id;
|
||||
memory_exception_data.failure.imprecise = true;
|
||||
|
||||
idx = srcu_read_lock(&kfd_processes_srcu);
|
||||
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
|
||||
mutex_lock(&p->event_mutex);
|
||||
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
|
||||
idr_for_each_entry_continue(&p->event_idr, ev, id)
|
||||
idr_for_each_entry_continue(&p->event_idr, ev, id) {
|
||||
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
|
||||
ev->hw_exception_data = hw_exception_data;
|
||||
set_event(ev);
|
||||
}
|
||||
if (ev->type == KFD_EVENT_TYPE_MEMORY &&
|
||||
reset_cause == KFD_HW_EXCEPTION_ECC) {
|
||||
ev->memory_exception_data = memory_exception_data;
|
||||
set_event(ev);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&p->event_mutex);
|
||||
}
|
||||
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||
|
@@ -276,6 +276,9 @@ struct kfd_dev {
|
||||
uint64_t hive_id;
|
||||
|
||||
bool pci_atomic_requested;
|
||||
|
||||
/* SRAM ECC flag */
|
||||
atomic_t sram_ecc_flag;
|
||||
};
|
||||
|
||||
enum kfd_mempool {
|
||||
|
@@ -37,6 +37,7 @@
|
||||
#include "kfd_device_queue_manager.h"
|
||||
#include "kfd_iommu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
/* topology_device_list - Master list of all topology devices */
|
||||
static struct list_head topology_device_list;
|
||||
@@ -1197,6 +1198,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
void *crat_image = NULL;
|
||||
size_t image_size = 0;
|
||||
int proximity_domain;
|
||||
struct amdgpu_ras *ctx;
|
||||
|
||||
INIT_LIST_HEAD(&temp_topology_device_list);
|
||||
|
||||
@@ -1328,6 +1330,20 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
|
||||
}
|
||||
|
||||
ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
|
||||
if (ctx) {
|
||||
/* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */
|
||||
dev->node_props.capability |=
|
||||
(((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
|
||||
((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
|
||||
HSA_CAP_SRAM_EDCSUPPORTED : 0;
|
||||
dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
|
||||
HSA_CAP_MEM_EDCSUPPORTED : 0;
|
||||
|
||||
dev->node_props.capability |= (ctx->features != 0) ?
|
||||
HSA_CAP_RASEVENTNOTIFY : 0;
|
||||
}
|
||||
|
||||
kfd_debug_print_topology();
|
||||
|
||||
if (!res)
|
||||
|
@@ -48,6 +48,10 @@
|
||||
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
|
||||
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
|
||||
|
||||
#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
|
||||
#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
|
||||
#define HSA_CAP_RASEVENTNOTIFY 0x00200000
|
||||
|
||||
struct kfd_node_properties {
|
||||
uint64_t hive_id;
|
||||
uint32_t cpu_cores_count;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -132,8 +132,6 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
struct drm_private_obj atomic_obj;
|
||||
|
||||
struct drm_modeset_lock atomic_obj_lock;
|
||||
|
||||
/**
|
||||
* @dc_lock:
|
||||
*
|
||||
@@ -184,6 +182,15 @@ struct amdgpu_display_manager {
|
||||
struct common_irq_params
|
||||
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
|
||||
|
||||
/**
|
||||
* @vupdate_params:
|
||||
*
|
||||
* Vertical update IRQ parameters, passed to registered handlers when
|
||||
* triggered.
|
||||
*/
|
||||
struct common_irq_params
|
||||
vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
|
||||
|
||||
spinlock_t irq_handler_list_table_lock;
|
||||
|
||||
struct backlight_device *backlight_dev;
|
||||
@@ -240,6 +247,10 @@ struct amdgpu_dm_connector {
|
||||
struct mutex hpd_lock;
|
||||
|
||||
bool fake_enable;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
uint32_t debugfs_dpcd_address;
|
||||
uint32_t debugfs_dpcd_size;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
|
||||
@@ -260,6 +271,9 @@ struct dm_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
struct dc_stream_state *stream;
|
||||
|
||||
int active_planes;
|
||||
bool interrupts_enabled;
|
||||
|
||||
int crc_skip_count;
|
||||
bool crc_enabled;
|
||||
|
||||
|
@@ -126,46 +126,51 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
|
||||
crtc->base.state->dev->dev_private;
|
||||
struct drm_color_lut *lut;
|
||||
uint32_t lut_size;
|
||||
struct dc_gamma *gamma;
|
||||
struct dc_gamma *gamma = NULL;
|
||||
enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
|
||||
|
||||
bool ret;
|
||||
|
||||
if (!blob) {
|
||||
if (!blob && adev->asic_type <= CHIP_RAVEN) {
|
||||
/* By default, use the SRGB predefined curve.*/
|
||||
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
|
||||
return 0;
|
||||
}
|
||||
|
||||
lut = (struct drm_color_lut *)blob->data;
|
||||
lut_size = blob->length / sizeof(struct drm_color_lut);
|
||||
if (blob) {
|
||||
lut = (struct drm_color_lut *)blob->data;
|
||||
lut_size = blob->length / sizeof(struct drm_color_lut);
|
||||
|
||||
gamma = dc_create_gamma();
|
||||
if (!gamma)
|
||||
return -ENOMEM;
|
||||
gamma = dc_create_gamma();
|
||||
if (!gamma)
|
||||
return -ENOMEM;
|
||||
|
||||
gamma->num_entries = lut_size;
|
||||
if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_RGB_256;
|
||||
else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_CS_TFM_1D;
|
||||
else {
|
||||
/* Invalid lut size */
|
||||
dc_gamma_release(&gamma);
|
||||
return -EINVAL;
|
||||
gamma->num_entries = lut_size;
|
||||
if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_RGB_256;
|
||||
else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
|
||||
gamma->type = GAMMA_CS_TFM_1D;
|
||||
else {
|
||||
/* Invalid lut size */
|
||||
dc_gamma_release(&gamma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Convert drm_lut into dc_gamma */
|
||||
__drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
|
||||
}
|
||||
|
||||
/* Convert drm_lut into dc_gamma */
|
||||
__drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
|
||||
|
||||
/* Call color module to translate into something DC understands. Namely
|
||||
* a transfer function.
|
||||
/* predefined gamma ROM only exist for RAVEN and pre-RAVEN ASIC,
|
||||
* set canRomBeUsed accordingly
|
||||
*/
|
||||
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
|
||||
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
|
||||
gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
|
||||
dc_gamma_release(&gamma);
|
||||
gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
|
||||
|
||||
if (gamma)
|
||||
dc_gamma_release(&gamma);
|
||||
|
||||
if (!ret) {
|
||||
stream->out_transfer_func->type = old_type;
|
||||
DRM_ERROR("Out of memory when calculating regamma params\n");
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm.h"
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/* function description
|
||||
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
|
||||
@@ -688,8 +689,131 @@ static int vrr_range_show(struct seq_file *m, void *data)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* function description
|
||||
*
|
||||
* generic SDP message access for testing
|
||||
*
|
||||
* debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x
|
||||
*
|
||||
* SDP header
|
||||
* Hb0 : Secondary-Data Packet ID
|
||||
* Hb1 : Secondary-Data Packet type
|
||||
* Hb2 : Secondary-Data-packet-specific header, Byte 0
|
||||
* Hb3 : Secondary-Data-packet-specific header, Byte 1
|
||||
*
|
||||
* for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data
|
||||
*/
|
||||
static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
int r;
|
||||
uint8_t data[36];
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dm_crtc_state *acrtc_state;
|
||||
uint32_t write_size = 36;
|
||||
|
||||
if (connector->base.status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
|
||||
|
||||
r = copy_from_user(data, buf, write_size);
|
||||
|
||||
write_size -= r;
|
||||
|
||||
dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
|
||||
|
||||
return write_size;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(vrr_range);
|
||||
|
||||
static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
|
||||
if (size < sizeof(connector->debugfs_dpcd_address))
|
||||
return 0;
|
||||
|
||||
r = copy_from_user(&connector->debugfs_dpcd_address,
|
||||
buf, sizeof(connector->debugfs_dpcd_address));
|
||||
|
||||
return size - r;
|
||||
}
|
||||
|
||||
static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
|
||||
if (size < sizeof(connector->debugfs_dpcd_size))
|
||||
return 0;
|
||||
|
||||
r = copy_from_user(&connector->debugfs_dpcd_size,
|
||||
buf, sizeof(connector->debugfs_dpcd_size));
|
||||
|
||||
if (connector->debugfs_dpcd_size > 256)
|
||||
connector->debugfs_dpcd_size = 0;
|
||||
|
||||
return size - r;
|
||||
}
|
||||
|
||||
static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
int r;
|
||||
char *data;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
uint32_t write_size = connector->debugfs_dpcd_size;
|
||||
|
||||
if (size < write_size)
|
||||
return 0;
|
||||
|
||||
data = kzalloc(write_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
r = copy_from_user(data, buf, write_size);
|
||||
|
||||
dm_helpers_dp_write_dpcd(link->ctx, link,
|
||||
connector->debugfs_dpcd_address, data, write_size - r);
|
||||
kfree(data);
|
||||
return write_size - r;
|
||||
}
|
||||
|
||||
static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
int r;
|
||||
char *data;
|
||||
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
|
||||
struct dc_link *link = connector->dc_link;
|
||||
uint32_t read_size = connector->debugfs_dpcd_size;
|
||||
|
||||
if (size < read_size)
|
||||
return 0;
|
||||
|
||||
data = kzalloc(read_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
dm_helpers_dp_read_dpcd(link->ctx, link,
|
||||
connector->debugfs_dpcd_address, data, read_size);
|
||||
|
||||
r = copy_to_user(buf, data, read_size);
|
||||
|
||||
kfree(data);
|
||||
return read_size - r;
|
||||
}
|
||||
|
||||
static const struct file_operations dp_link_settings_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_link_settings_read,
|
||||
@@ -710,6 +834,31 @@ static const struct file_operations dp_phy_test_pattern_fops = {
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations sdp_message_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = dp_sdp_message_debugfs_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dpcd_address_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = dp_dpcd_address_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dpcd_size_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = dp_dpcd_size_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dpcd_data_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = dp_dpcd_data_read,
|
||||
.write = dp_dpcd_data_write,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct {
|
||||
char *name;
|
||||
const struct file_operations *fops;
|
||||
@@ -717,7 +866,11 @@ static const struct {
|
||||
{"link_settings", &dp_link_settings_debugfs_fops},
|
||||
{"phy_settings", &dp_phy_settings_debugfs_fop},
|
||||
{"test_pattern", &dp_phy_test_pattern_fops},
|
||||
{"vrr_range", &vrr_range_fops}
|
||||
{"vrr_range", &vrr_range_fops},
|
||||
{"sdp_message", &sdp_message_fops},
|
||||
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
|
||||
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
|
||||
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
|
||||
};
|
||||
|
||||
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
|
||||
@@ -842,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
|
||||
{"amdgpu_target_backlight_pwm", &target_backlight_read},
|
||||
};
|
||||
|
||||
/*
|
||||
* Sets the DC visual confirm debug option from the given string.
|
||||
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
|
||||
*/
|
||||
static int visual_confirm_set(void *data, u64 val)
|
||||
{
|
||||
struct amdgpu_device *adev = data;
|
||||
|
||||
adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reads the DC visual confirm debug option value into the given buffer.
|
||||
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
|
||||
*/
|
||||
static int visual_confirm_get(void *data, u64 *val)
|
||||
{
|
||||
struct amdgpu_device *adev = data;
|
||||
|
||||
*val = adev->dm.dc->debug.visual_confirm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
|
||||
visual_confirm_set, "%llu\n");
|
||||
|
||||
int dtn_debugfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
static const struct file_operations dtn_log_fops = {
|
||||
@@ -867,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
|
||||
adev,
|
||||
&dtn_log_fops);
|
||||
|
||||
return PTR_ERR_OR_ZERO(ent);
|
||||
if (IS_ERR(ent))
|
||||
return PTR_ERR(ent);
|
||||
|
||||
ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
|
||||
adev, &visual_confirm_fops);
|
||||
if (IS_ERR(ent))
|
||||
return PTR_ERR(ent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -264,7 +264,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
}
|
||||
|
||||
/*
|
||||
* poll pending down reply before clear payload allocation table
|
||||
* poll pending down reply
|
||||
*/
|
||||
void dm_helpers_dp_mst_poll_pending_down_reply(
|
||||
struct dc_context *ctx,
|
||||
|
@@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int crtc_id,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return dm_irq_state(
|
||||
adev,
|
||||
source,
|
||||
crtc_id,
|
||||
state,
|
||||
IRQ_TYPE_VUPDATE,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
|
||||
.set = amdgpu_dm_set_crtc_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
|
||||
.set = amdgpu_dm_set_vupdate_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
|
||||
.set = amdgpu_dm_set_pflip_irq_state,
|
||||
.process = amdgpu_dm_irq_handler,
|
||||
@@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||
|
||||
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
|
||||
|
||||
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
|
||||
|
||||
|
@@ -84,6 +84,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
{
|
||||
ssize_t result = 0;
|
||||
struct aux_payload payload;
|
||||
enum aux_channel_operation_result operation_result;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
@@ -97,13 +98,27 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
||||
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
|
||||
payload.defer_delay = 0;
|
||||
|
||||
result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload);
|
||||
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
|
||||
&operation_result);
|
||||
|
||||
if (payload.write)
|
||||
result = msg->size;
|
||||
|
||||
if (result < 0) /* DC doesn't know about kernel error codes */
|
||||
result = -EIO;
|
||||
if (result < 0)
|
||||
switch (operation_result) {
|
||||
case AUX_CHANNEL_OPERATION_SUCCEEDED:
|
||||
break;
|
||||
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
|
||||
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
|
||||
result = -EIO;
|
||||
break;
|
||||
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
|
||||
result = -EBUSY;
|
||||
break;
|
||||
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
|
||||
result = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include "amdgpu_dm_irq.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "dm_pp_smu.h"
|
||||
#include "amdgpu_smu.h"
|
||||
|
||||
|
||||
bool dm_pp_apply_display_requirements(
|
||||
@@ -40,6 +41,7 @@ bool dm_pp_apply_display_requirements(
|
||||
const struct dm_pp_display_configuration *pp_display_cfg)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
struct smu_context *smu = &adev->smu;
|
||||
int i;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
@@ -105,6 +107,9 @@ bool dm_pp_apply_display_requirements(
|
||||
adev->powerplay.pp_funcs->display_configuration_change(
|
||||
adev->powerplay.pp_handle,
|
||||
&adev->pm.pm_display_cfg);
|
||||
else
|
||||
smu_display_configuration_change(smu,
|
||||
&adev->pm.pm_display_cfg);
|
||||
|
||||
amdgpu_pm_compute_clocks(adev);
|
||||
}
|
||||
@@ -308,6 +313,12 @@ bool dm_pp_get_clock_levels_by_type(
|
||||
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type), &pp_clks)) {
|
||||
/* Error in pplib. Provide default values. */
|
||||
return true;
|
||||
}
|
||||
} else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
|
||||
if (smu_get_clock_by_type(&adev->smu,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clks)) {
|
||||
get_default_clock_levels(clk_type, dc_clks);
|
||||
return true;
|
||||
}
|
||||
@@ -324,6 +335,13 @@ bool dm_pp_get_clock_levels_by_type(
|
||||
validation_clks.memory_max_clock = 80000;
|
||||
validation_clks.level = 0;
|
||||
}
|
||||
} else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
|
||||
if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
|
||||
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
|
||||
validation_clks.engine_max_clock = 72000;
|
||||
validation_clks.memory_max_clock = 80000;
|
||||
validation_clks.level = 0;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_INFO("DM_PPLIB: Validation clocks:\n");
|
||||
@@ -374,14 +392,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
struct pp_clock_levels_with_latency pp_clks = { 0 };
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int ret;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
|
||||
return false;
|
||||
if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
|
||||
ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clks);
|
||||
if (ret)
|
||||
return false;
|
||||
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
|
||||
if (smu_get_clock_by_type_with_latency(&adev->smu,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clks))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clks))
|
||||
return false;
|
||||
|
||||
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
|
||||
|
||||
@@ -397,14 +422,20 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
struct pp_clock_levels_with_voltage pp_clk_info = {0};
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
int ret;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
|
||||
return false;
|
||||
|
||||
if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clk_info))
|
||||
return false;
|
||||
if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
|
||||
ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clk_info);
|
||||
if (ret)
|
||||
return false;
|
||||
} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
|
||||
if (smu_get_clock_by_type_with_voltage(&adev->smu,
|
||||
dc_to_pp_clock_type(clk_type),
|
||||
&pp_clk_info))
|
||||
return false;
|
||||
}
|
||||
|
||||
pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
|
||||
|
||||
@@ -445,6 +476,10 @@ bool dm_pp_apply_clock_for_voltage_request(
|
||||
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
|
||||
adev->powerplay.pp_handle,
|
||||
&pp_clock_request);
|
||||
else if (adev->smu.funcs &&
|
||||
adev->smu.funcs->display_clock_voltage_request)
|
||||
ret = smu_display_clock_voltage_request(&adev->smu,
|
||||
&pp_clock_request);
|
||||
if (ret)
|
||||
return false;
|
||||
return true;
|
||||
@@ -462,6 +497,8 @@ bool dm_pp_get_static_clocks(
|
||||
ret = adev->powerplay.pp_funcs->get_current_clocks(
|
||||
adev->powerplay.pp_handle,
|
||||
&pp_clk_info);
|
||||
else if (adev->smu.funcs)
|
||||
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
@@ -472,27 +509,6 @@ bool dm_pp_get_static_clocks(
|
||||
return true;
|
||||
}
|
||||
|
||||
void pp_rv_set_display_requirement(struct pp_smu *pp,
|
||||
struct pp_smu_display_requirement_rv *req)
|
||||
{
|
||||
const struct dc_context *ctx = pp->dm;
|
||||
struct amdgpu_device *adev = ctx->driver_context;
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
struct pp_display_clock_request clock = {0};
|
||||
|
||||
if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
|
||||
return;
|
||||
|
||||
clock.clock_type = amd_pp_dcf_clock;
|
||||
clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
|
||||
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
|
||||
|
||||
clock.clock_type = amd_pp_f_clock;
|
||||
clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
|
||||
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
|
||||
}
|
||||
|
||||
void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
||||
struct pp_smu_wm_range_sets *ranges)
|
||||
{
|
||||
@@ -508,9 +524,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
||||
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
|
||||
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
|
||||
return;
|
||||
|
||||
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
|
||||
if (ranges->reader_wm_sets[i].wm_inst > 3)
|
||||
wm_dce_clocks[i].wm_set_id = WM_SET_A;
|
||||
@@ -543,7 +556,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
|
||||
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
|
||||
}
|
||||
|
||||
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
|
||||
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
|
||||
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
|
||||
&wm_with_clock_ranges);
|
||||
else if (adev->smu.funcs &&
|
||||
adev->smu.funcs->set_watermarks_for_clock_ranges)
|
||||
smu_set_watermarks_for_clock_ranges(&adev->smu,
|
||||
&wm_with_clock_ranges);
|
||||
}
|
||||
|
||||
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
|
||||
@@ -553,10 +572,10 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
|
||||
void *pp_handle = adev->powerplay.pp_handle;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
|
||||
return;
|
||||
|
||||
pp_funcs->notify_smu_enable_pwe(pp_handle);
|
||||
if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
|
||||
pp_funcs->notify_smu_enable_pwe(pp_handle);
|
||||
else if (adev->smu.funcs)
|
||||
smu_notify_smu_enable_pwe(&adev->smu);
|
||||
}
|
||||
|
||||
void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
|
||||
@@ -611,17 +630,16 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
|
||||
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
|
||||
}
|
||||
|
||||
void dm_pp_get_funcs_rv(
|
||||
void dm_pp_get_funcs(
|
||||
struct dc_context *ctx,
|
||||
struct pp_smu_funcs_rv *funcs)
|
||||
struct pp_smu_funcs *funcs)
|
||||
{
|
||||
funcs->pp_smu.dm = ctx;
|
||||
funcs->set_display_requirement = pp_rv_set_display_requirement;
|
||||
funcs->set_wm_ranges = pp_rv_set_wm_ranges;
|
||||
funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
|
||||
funcs->set_display_count = pp_rv_set_active_display_count;
|
||||
funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
|
||||
funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
|
||||
funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
|
||||
funcs->rv_funcs.pp_smu.dm = ctx;
|
||||
funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
|
||||
funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
|
||||
funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
|
||||
funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
|
||||
funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
|
||||
funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
|
||||
}
|
||||
|
||||
|
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
|
||||
return min_clamp;
|
||||
}
|
||||
|
||||
unsigned int dc_fixpt_u4d19(struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 4, 19);
|
||||
}
|
||||
|
||||
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
|
||||
{
|
||||
return ux_dy(arg.value, 3, 19);
|
||||
|
@@ -247,6 +247,53 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
|
||||
}
|
||||
}
|
||||
|
||||
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
|
||||
{
|
||||
switch (sw_mode) {
|
||||
/* for 4/8/16 high tiles */
|
||||
case DC_SW_LINEAR:
|
||||
return dm_4k_tile;
|
||||
case DC_SW_4KB_S:
|
||||
case DC_SW_4KB_S_X:
|
||||
return dm_4k_tile;
|
||||
case DC_SW_64KB_S:
|
||||
case DC_SW_64KB_S_X:
|
||||
case DC_SW_64KB_S_T:
|
||||
return dm_64k_tile;
|
||||
case DC_SW_VAR_S:
|
||||
case DC_SW_VAR_S_X:
|
||||
return dm_256k_tile;
|
||||
|
||||
/* For 64bpp 2 high tiles */
|
||||
case DC_SW_4KB_D:
|
||||
case DC_SW_4KB_D_X:
|
||||
return dm_4k_tile;
|
||||
case DC_SW_64KB_D:
|
||||
case DC_SW_64KB_D_X:
|
||||
case DC_SW_64KB_D_T:
|
||||
return dm_64k_tile;
|
||||
case DC_SW_VAR_D:
|
||||
case DC_SW_VAR_D_X:
|
||||
return dm_256k_tile;
|
||||
|
||||
case DC_SW_4KB_R:
|
||||
case DC_SW_4KB_R_X:
|
||||
return dm_4k_tile;
|
||||
case DC_SW_64KB_R:
|
||||
case DC_SW_64KB_R_X:
|
||||
return dm_64k_tile;
|
||||
case DC_SW_VAR_R:
|
||||
case DC_SW_VAR_R_X:
|
||||
return dm_256k_tile;
|
||||
|
||||
/* Unsupported swizzle modes for dcn */
|
||||
case DC_SW_256B_S:
|
||||
default:
|
||||
ASSERT(0); /* Not supported */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void pipe_ctx_to_e2e_pipe_params (
|
||||
const struct pipe_ctx *pipe,
|
||||
struct _vcs_dpi_display_pipe_params_st *input)
|
||||
@@ -287,46 +334,7 @@ static void pipe_ctx_to_e2e_pipe_params (
|
||||
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
|
||||
input->src.cur0_bpp = 32;
|
||||
|
||||
switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
|
||||
/* for 4/8/16 high tiles */
|
||||
case DC_SW_LINEAR:
|
||||
input->src.macro_tile_size = dm_4k_tile;
|
||||
break;
|
||||
case DC_SW_4KB_S:
|
||||
case DC_SW_4KB_S_X:
|
||||
input->src.macro_tile_size = dm_4k_tile;
|
||||
break;
|
||||
case DC_SW_64KB_S:
|
||||
case DC_SW_64KB_S_X:
|
||||
case DC_SW_64KB_S_T:
|
||||
input->src.macro_tile_size = dm_64k_tile;
|
||||
break;
|
||||
case DC_SW_VAR_S:
|
||||
case DC_SW_VAR_S_X:
|
||||
input->src.macro_tile_size = dm_256k_tile;
|
||||
break;
|
||||
|
||||
/* For 64bpp 2 high tiles */
|
||||
case DC_SW_4KB_D:
|
||||
case DC_SW_4KB_D_X:
|
||||
input->src.macro_tile_size = dm_4k_tile;
|
||||
break;
|
||||
case DC_SW_64KB_D:
|
||||
case DC_SW_64KB_D_X:
|
||||
case DC_SW_64KB_D_T:
|
||||
input->src.macro_tile_size = dm_64k_tile;
|
||||
break;
|
||||
case DC_SW_VAR_D:
|
||||
case DC_SW_VAR_D_X:
|
||||
input->src.macro_tile_size = dm_256k_tile;
|
||||
break;
|
||||
|
||||
/* Unsupported swizzle modes for dcn */
|
||||
case DC_SW_256B_S:
|
||||
default:
|
||||
ASSERT(0); /* Not supported */
|
||||
break;
|
||||
}
|
||||
input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
|
||||
|
||||
switch (pipe->plane_state->rotation) {
|
||||
case ROTATION_ANGLE_0:
|
||||
@@ -466,7 +474,7 @@ static void dcn_bw_calc_rq_dlg_ttu(
|
||||
input.clks_cfg.dcfclk_mhz = v->dcfclk;
|
||||
input.clks_cfg.dispclk_mhz = v->dispclk;
|
||||
input.clks_cfg.dppclk_mhz = v->dppclk;
|
||||
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
|
||||
input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
|
||||
input.clks_cfg.socclk_mhz = v->socclk;
|
||||
input.clks_cfg.voltage = v->voltage_level;
|
||||
// dc->dml.logger = pool->base.logger;
|
||||
@@ -536,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
|
||||
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
|
||||
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
|
||||
|
||||
context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
|
||||
v->stutter_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
v->stutter_enter_plus_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
|
||||
v->dram_clock_change_watermark * 1000;
|
||||
context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
|
||||
|
||||
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
|
||||
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
|
||||
v->dcfclk = v->dcfclkv_nom0p8;
|
||||
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
|
||||
|
||||
context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
|
||||
v->stutter_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
v->stutter_enter_plus_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
|
||||
v->dram_clock_change_watermark * 1000;
|
||||
context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
|
||||
}
|
||||
|
||||
if (v->voltage_level < 3) {
|
||||
@@ -571,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
|
||||
v->dcfclk = v->dcfclkv_max0p9;
|
||||
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
|
||||
|
||||
context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
|
||||
v->stutter_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
v->stutter_enter_plus_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
|
||||
v->dram_clock_change_watermark * 1000;
|
||||
context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
|
||||
}
|
||||
|
||||
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
|
||||
@@ -591,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
|
||||
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
|
||||
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
|
||||
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
|
||||
v->stutter_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
v->stutter_enter_plus_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
|
||||
v->dram_clock_change_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
|
||||
if (v->voltage_level >= 2) {
|
||||
context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
|
||||
context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
}
|
||||
if (v->voltage_level >= 3)
|
||||
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -693,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
|
||||
|
||||
bool dcn_validate_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
/*
|
||||
* we want a breakdown of the various stages of validation, which the
|
||||
* perf_trace macro doesn't support
|
||||
*/
|
||||
BW_VAL_TRACE_SETUP();
|
||||
|
||||
const struct resource_pool *pool = dc->res_pool;
|
||||
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
|
||||
int i, input_idx;
|
||||
@@ -703,6 +718,9 @@ bool dcn_validate_bandwidth(
|
||||
float bw_limit;
|
||||
|
||||
PERFORMANCE_TRACE_START();
|
||||
|
||||
BW_VAL_TRACE_COUNT();
|
||||
|
||||
if (dcn_bw_apply_registry_override(dc))
|
||||
dcn_bw_sync_calcs_and_dml(dc);
|
||||
|
||||
@@ -1000,13 +1018,16 @@ bool dcn_validate_bandwidth(
|
||||
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
|
||||
if (dc->debug.sr_exit_time_dpm0_ns)
|
||||
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
|
||||
dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
|
||||
dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
|
||||
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
|
||||
context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
|
||||
mode_support_and_system_configuration(v);
|
||||
}
|
||||
|
||||
if (v->voltage_level != 5) {
|
||||
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
|
||||
|
||||
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
|
||||
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
|
||||
|
||||
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
|
||||
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
|
||||
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
|
||||
@@ -1027,58 +1048,60 @@ bool dcn_validate_bandwidth(
|
||||
*/
|
||||
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
|
||||
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
|
||||
v->stutter_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
|
||||
v->stutter_enter_plus_exit_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
|
||||
v->dram_clock_change_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
|
||||
context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
|
||||
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
|
||||
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
|
||||
|
||||
context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
|
||||
(ddr4_dram_factor_single_Channel * v->number_of_channels));
|
||||
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
|
||||
context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
|
||||
}
|
||||
|
||||
context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
|
||||
context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
|
||||
|
||||
context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
|
||||
if (dc->debug.max_disp_clk == true)
|
||||
context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
|
||||
|
||||
if (context->bw.dcn.clk.dispclk_khz <
|
||||
if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
|
||||
dc->debug.min_disp_clk_khz) {
|
||||
context->bw.dcn.clk.dispclk_khz =
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz =
|
||||
dc->debug.min_disp_clk_khz;
|
||||
}
|
||||
|
||||
context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
|
||||
context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
|
||||
context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
|
||||
switch (v->voltage_level) {
|
||||
case 0:
|
||||
context->bw.dcn.clk.max_supported_dppclk_khz =
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
|
||||
break;
|
||||
case 1:
|
||||
context->bw.dcn.clk.max_supported_dppclk_khz =
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
|
||||
break;
|
||||
case 2:
|
||||
context->bw.dcn.clk.max_supported_dppclk_khz =
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
|
||||
break;
|
||||
default:
|
||||
context->bw.dcn.clk.max_supported_dppclk_khz =
|
||||
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
|
||||
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
|
||||
break;
|
||||
}
|
||||
|
||||
BW_VAL_TRACE_END_WATERMARKS();
|
||||
|
||||
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
@@ -1141,7 +1164,7 @@ bool dcn_validate_bandwidth(
|
||||
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
|
||||
} else {
|
||||
/* pipe not split previously needs split */
|
||||
hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
|
||||
hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
|
||||
ASSERT(hsplit_pipe);
|
||||
split_stream_across_pipes(
|
||||
&context->res_ctx, pool,
|
||||
@@ -1169,13 +1192,17 @@ bool dcn_validate_bandwidth(
|
||||
|
||||
input_idx++;
|
||||
}
|
||||
} else if (v->voltage_level == number_of_states_plus_one) {
|
||||
BW_VAL_TRACE_SKIP(fail);
|
||||
} else if (fast_validate) {
|
||||
BW_VAL_TRACE_SKIP(fast);
|
||||
}
|
||||
|
||||
if (v->voltage_level == 0) {
|
||||
|
||||
dc->dml.soc.sr_enter_plus_exit_time_us =
|
||||
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
|
||||
dc->dcn_soc->sr_enter_plus_exit_time;
|
||||
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
|
||||
context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1188,6 +1215,7 @@ bool dcn_validate_bandwidth(
|
||||
kernel_fpu_end();
|
||||
|
||||
PERFORMANCE_TRACE_END();
|
||||
BW_VAL_TRACE_FINISH();
|
||||
|
||||
if (bw_limit_pass && v->voltage_level != 5)
|
||||
return true;
|
||||
@@ -1395,12 +1423,14 @@ void dcn_bw_update_from_pplib(struct dc *dc)
|
||||
|
||||
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
|
||||
{
|
||||
struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
|
||||
struct pp_smu_funcs_rv *pp = NULL;
|
||||
struct pp_smu_wm_range_sets ranges = {0};
|
||||
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
|
||||
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
|
||||
|
||||
if (!pp->set_wm_ranges)
|
||||
if (dc->res_pool->pp_smu)
|
||||
pp = &dc->res_pool->pp_smu->rv_funcs;
|
||||
if (!pp || !pp->set_wm_ranges)
|
||||
return;
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
struct dc_stream_state *link_stream;
|
||||
struct dc_link_settings store_settings = *link_setting;
|
||||
|
||||
link->preferred_link_setting = store_settings;
|
||||
|
||||
/* Retrain with preferred link settings only relevant for
|
||||
* DP signal type
|
||||
*/
|
||||
if (!dc_is_dp_signal(link->connector_signal))
|
||||
return;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe->stream && pipe->stream->link) {
|
||||
@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
|
||||
|
||||
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
|
||||
|
||||
link->preferred_link_setting = store_settings;
|
||||
/* Cannot retrain link if backend is off */
|
||||
if (link_stream->dpms_off)
|
||||
return;
|
||||
|
||||
if (link_stream)
|
||||
decide_link_settings(link_stream, &store_settings);
|
||||
|
||||
@@ -573,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
|
||||
cust_pattern_size);
|
||||
}
|
||||
|
||||
uint32_t dc_link_bandwidth_kbps(
|
||||
const struct dc_link *link,
|
||||
const struct dc_link_settings *link_setting)
|
||||
{
|
||||
uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
|
||||
|
||||
link_bw_kbps *= 8; /* 8 bits per byte*/
|
||||
link_bw_kbps *= link_setting->lane_count;
|
||||
|
||||
return link_bw_kbps;
|
||||
|
||||
}
|
||||
|
||||
const struct dc_link_settings *dc_link_get_link_cap(
|
||||
const struct dc_link *link)
|
||||
{
|
||||
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
|
||||
return &link->preferred_link_setting;
|
||||
return &link->verified_link_cap;
|
||||
}
|
||||
|
||||
static void destruct(struct dc *dc)
|
||||
{
|
||||
dc_release_state(dc->current_state);
|
||||
@@ -621,6 +654,10 @@ static bool construct(struct dc *dc,
|
||||
#endif
|
||||
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
dc->config = init_params->flags;
|
||||
|
||||
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
|
||||
|
||||
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
|
||||
if (!dc_dceip) {
|
||||
dm_error("%s: failed to create dceip\n", __func__);
|
||||
@@ -668,13 +705,6 @@ static bool construct(struct dc *dc,
|
||||
dc_ctx->dc_stream_id_count = 0;
|
||||
dc->ctx = dc_ctx;
|
||||
|
||||
dc->current_state = dc_create_state();
|
||||
|
||||
if (!dc->current_state) {
|
||||
dm_error("%s: failed to create validate ctx\n", __func__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create logger */
|
||||
|
||||
dc_ctx->dce_environment = init_params->dce_environment;
|
||||
@@ -722,14 +752,22 @@ static bool construct(struct dc *dc,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dc->res_pool = dc_create_resource_pool(
|
||||
dc,
|
||||
init_params->num_virtual_links,
|
||||
dc_version,
|
||||
init_params->asic_id);
|
||||
dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
|
||||
if (!dc->res_pool)
|
||||
goto fail;
|
||||
|
||||
/* Creation of current_state must occur after dc->dml
|
||||
* is initialized in dc_create_resource_pool because
|
||||
* on creation it copies the contents of dc->dml
|
||||
*/
|
||||
|
||||
dc->current_state = dc_create_state(dc);
|
||||
|
||||
if (!dc->current_state) {
|
||||
dm_error("%s: failed to create validate ctx\n", __func__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dc_resource_state_construct(dc, dc->current_state);
|
||||
|
||||
if (!create_links(dc, init_params->num_virtual_links))
|
||||
@@ -746,7 +784,7 @@ fail:
|
||||
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *dangling_context = dc_create_state();
|
||||
struct dc_state *dangling_context = dc_create_state(dc);
|
||||
struct dc_state *current_ctx;
|
||||
|
||||
if (dangling_context == NULL)
|
||||
@@ -811,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
if (dc->res_pool->dmcu != NULL)
|
||||
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
|
||||
|
||||
dc->config = init_params->flags;
|
||||
|
||||
dc->build_id = DC_BUILD_ID;
|
||||
|
||||
DC_LOG_DC("Display Core initialized\n");
|
||||
@@ -969,7 +1005,7 @@ static bool context_changed(
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dc_validate_seamless_boot_timing(struct dc *dc,
|
||||
bool dc_validate_seamless_boot_timing(const struct dc *dc,
|
||||
const struct dc_sink *sink,
|
||||
struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
@@ -1060,7 +1096,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
if (!dcb->funcs->is_accelerated_mode(dcb))
|
||||
dc->hwss.enable_accelerated_mode(dc, context);
|
||||
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
if (context->streams[i]->apply_seamless_boot_optimization)
|
||||
dc->optimize_seamless_boot = true;
|
||||
}
|
||||
|
||||
if (!dc->optimize_seamless_boot)
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
|
||||
/* re-program planes for existing stream, in case we need to
|
||||
* free up plane resource for later use
|
||||
@@ -1135,12 +1177,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
|
||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
if (!dc->optimize_seamless_boot)
|
||||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.optimize_bandwidth(dc, context);
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
context->streams[i]->mode_changed = false;
|
||||
|
||||
memset(&context->commit_hints, 0, sizeof(context->commit_hints));
|
||||
|
||||
dc_release_state(dc->current_state);
|
||||
|
||||
dc->current_state = context;
|
||||
@@ -1177,7 +1222,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
int i;
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if (dc->optimized_required == false)
|
||||
if (!dc->optimized_required || dc->optimize_seamless_boot)
|
||||
return true;
|
||||
|
||||
post_surface_trace(dc);
|
||||
@@ -1195,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct dc_state *dc_create_state(void)
|
||||
struct dc_state *dc_create_state(struct dc *dc)
|
||||
{
|
||||
struct dc_state *context = kzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!context)
|
||||
return NULL;
|
||||
/* Each context must have their own instance of VBA and in order to
|
||||
* initialize and obtain IP and SOC the base DML instance from DC is
|
||||
* initially copied into every context
|
||||
*/
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
|
||||
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
|
||||
#endif
|
||||
|
||||
kref_init(&context->refcount);
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
|
||||
{
|
||||
int i, j;
|
||||
struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!new_ctx)
|
||||
return NULL;
|
||||
|
||||
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (cur_pipe->top_pipe)
|
||||
cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
|
||||
|
||||
if (cur_pipe->bottom_pipe)
|
||||
cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < new_ctx->stream_count; i++) {
|
||||
dc_stream_retain(new_ctx->streams[i]);
|
||||
for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
|
||||
dc_plane_state_retain(
|
||||
new_ctx->stream_status[i].plane_states[j]);
|
||||
}
|
||||
|
||||
kref_init(&new_ctx->refcount);
|
||||
|
||||
return new_ctx;
|
||||
}
|
||||
|
||||
void dc_retain_state(struct dc_state *context)
|
||||
{
|
||||
kref_get(&context->refcount);
|
||||
@@ -1666,6 +1753,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||
continue;
|
||||
|
||||
if (stream_update->dpms_off) {
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
|
||||
if (*stream_update->dpms_off) {
|
||||
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
|
||||
dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
||||
@@ -1673,6 +1761,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||
}
|
||||
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
|
||||
}
|
||||
|
||||
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
|
||||
@@ -1700,7 +1789,16 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
int i, j;
|
||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
if (dc->optimize_seamless_boot && surface_count > 0) {
|
||||
/* Optimize seamless boot flag keeps clocks and watermarks high until
|
||||
* first flip. After first flip, optimization is required to lower
|
||||
* bandwidth.
|
||||
*/
|
||||
dc->optimize_seamless_boot = false;
|
||||
dc->optimized_required = true;
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
context_clock_trace(dc, context);
|
||||
}
|
||||
@@ -1800,7 +1898,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
if (update_type >= UPDATE_TYPE_FULL) {
|
||||
|
||||
/* initialize scratch memory for building context */
|
||||
context = dc_create_state();
|
||||
context = dc_create_state(dc);
|
||||
if (context == NULL) {
|
||||
DC_ERROR("Failed to allocate new validate context!\n");
|
||||
return;
|
||||
@@ -2099,13 +2197,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
|
||||
|
||||
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
|
||||
{
|
||||
info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
|
||||
info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
|
||||
info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
|
||||
info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
|
||||
info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
|
||||
info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
|
||||
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
|
||||
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
|
||||
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
|
||||
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
|
||||
info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
|
||||
info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
|
||||
info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
|
||||
info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
|
||||
info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
|
||||
info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
|
||||
info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
|
||||
info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
|
||||
}
|
||||
|
@@ -351,19 +351,19 @@ void context_clock_trace(
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
context->bw.dcn.clk.dispclk_khz,
|
||||
context->bw.dcn.clk.dppclk_khz,
|
||||
context->bw.dcn.clk.dcfclk_khz,
|
||||
context->bw.dcn.clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.clk.fclk_khz,
|
||||
context->bw.dcn.clk.socclk_khz);
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.socclk_khz);
|
||||
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
|
||||
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
|
||||
context->bw.dcn.clk.dispclk_khz,
|
||||
context->bw.dcn.clk.dppclk_khz,
|
||||
context->bw.dcn.clk.dcfclk_khz,
|
||||
context->bw.dcn.clk.dcfclk_deep_sleep_khz,
|
||||
context->bw.dcn.clk.fclk_khz,
|
||||
context->bw.dcn.clk.socclk_khz);
|
||||
context->bw_ctx.bw.dcn.clk.dispclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
|
||||
context->bw_ctx.bw.dcn.clk.fclk_khz,
|
||||
context->bw_ctx.bw.dcn.clk.socclk_khz);
|
||||
#endif
|
||||
}
|
||||
|
@@ -58,7 +58,6 @@
|
||||
******************************************************************************/
|
||||
|
||||
enum {
|
||||
LINK_RATE_REF_FREQ_IN_MHZ = 27,
|
||||
PEAK_FACTOR_X1000 = 1006,
|
||||
/*
|
||||
* Some receivers fail to train on first try and are good
|
||||
@@ -515,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
|
||||
}
|
||||
|
||||
|
||||
static void read_edp_current_link_settings_on_detect(struct dc_link *link)
|
||||
{
|
||||
union lane_count_set lane_count_set = { {0} };
|
||||
uint8_t link_bw_set;
|
||||
uint8_t link_rate_set;
|
||||
|
||||
// Read DPCD 00101h to find out the number of lanes currently set
|
||||
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
|
||||
&lane_count_set.raw, sizeof(lane_count_set));
|
||||
link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
|
||||
|
||||
// Read DPCD 00100h to find if standard link rates are set
|
||||
core_link_read_dpcd(link, DP_LINK_BW_SET,
|
||||
&link_bw_set, sizeof(link_bw_set));
|
||||
|
||||
if (link_bw_set == 0) {
|
||||
/* If standard link rates are not being used,
|
||||
* Read DPCD 00115h to find the link rate set used
|
||||
*/
|
||||
core_link_read_dpcd(link, DP_LINK_RATE_SET,
|
||||
&link_rate_set, sizeof(link_rate_set));
|
||||
|
||||
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
|
||||
link->cur_link_settings.link_rate =
|
||||
link->dpcd_caps.edp_supported_link_rates[link_rate_set];
|
||||
link->cur_link_settings.link_rate_set = link_rate_set;
|
||||
link->cur_link_settings.use_link_rate_set = true;
|
||||
}
|
||||
} else {
|
||||
link->cur_link_settings.link_rate = link_bw_set;
|
||||
link->cur_link_settings.use_link_rate_set = false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool detect_dp(
|
||||
struct dc_link *link,
|
||||
struct display_sink_capability *sink_caps,
|
||||
@@ -640,7 +673,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
bool same_dpcd = true;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
|
||||
|
||||
if (dc_is_virtual_signal(link->connector_signal))
|
||||
return false;
|
||||
|
||||
if (false == dc_link_detect_sink(link, &new_connection_type)) {
|
||||
@@ -648,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP &&
|
||||
link->local_sink)
|
||||
return true;
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
/* On detect, we want to make sure current link settings are
|
||||
* up to date, especially if link was powered on by GOP.
|
||||
*/
|
||||
read_edp_current_link_settings_on_detect(link);
|
||||
if (link->local_sink)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_LVDS &&
|
||||
link->local_sink)
|
||||
@@ -720,9 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
||||
same_dpcd = false;
|
||||
}
|
||||
/* Active dongle plug in without display or downstream unplug*/
|
||||
if (link->type == dc_connection_active_dongle
|
||||
&& link->dpcd_caps.sink_count.
|
||||
bits.SINK_COUNT == 0) {
|
||||
if (link->type == dc_connection_active_dongle &&
|
||||
link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
|
||||
if (prev_sink != NULL) {
|
||||
/* Downstream unplug */
|
||||
dc_sink_release(prev_sink);
|
||||
@@ -1172,8 +1210,6 @@ static bool construct(
|
||||
goto create_fail;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* TODO: #DAL3 Implement id to str function.*/
|
||||
LINK_INFO("Connector[%d] description:"
|
||||
"signal %d\n",
|
||||
@@ -1207,7 +1243,7 @@ static bool construct(
|
||||
link->link_enc = link->dc->res_pool->funcs->link_enc_create(
|
||||
&enc_init_data);
|
||||
|
||||
if( link->link_enc == NULL) {
|
||||
if (link->link_enc == NULL) {
|
||||
DC_ERROR("Failed to create link encoder!\n");
|
||||
goto link_enc_create_fail;
|
||||
}
|
||||
@@ -1399,9 +1435,24 @@ static enum dc_status enable_link_dp(
|
||||
/* get link settings for video mode timing */
|
||||
decide_link_settings(stream, &link_settings);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
|
||||
/* If link settings are different than current and link already enabled
|
||||
* then need to disable before programming to new rate.
|
||||
*/
|
||||
if (link->link_status.link_active &&
|
||||
(link->cur_link_settings.lane_count != link_settings.lane_count ||
|
||||
link->cur_link_settings.link_rate != link_settings.link_rate)) {
|
||||
dp_disable_link_phy(link, pipe_ctx->stream->signal);
|
||||
}
|
||||
|
||||
/*in case it is not on*/
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
|
||||
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
state->dccg->funcs->update_clocks(state->dccg, state, false);
|
||||
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
|
||||
|
||||
dp_enable_link_phy(
|
||||
link,
|
||||
@@ -1442,15 +1493,9 @@ static enum dc_status enable_link_edp(
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
enum dc_status status;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->link;
|
||||
/*in case it is not on*/
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
|
||||
status = enable_link_dp(state, pipe_ctx);
|
||||
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1466,14 +1511,14 @@ static enum dc_status enable_link_dp_mst(
|
||||
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
|
||||
return DC_OK;
|
||||
|
||||
/* to make sure the pending down rep can be processed
|
||||
* before clear payload table
|
||||
*/
|
||||
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
|
||||
|
||||
/* clear payload table */
|
||||
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
|
||||
|
||||
/* to make sure the pending down rep can be processed
|
||||
* before enabling the link
|
||||
*/
|
||||
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
|
||||
|
||||
/* set the sink to MST mode before enabling the link */
|
||||
dp_enable_mst_on_sink(link, true);
|
||||
|
||||
@@ -1982,7 +2027,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
||||
pipe_ctx->stream->signal,
|
||||
stream->phy_pix_clk);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
dal_ddc_service_read_scdc_data(link->ddc);
|
||||
}
|
||||
|
||||
@@ -2074,11 +2119,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
|
||||
{
|
||||
|
||||
uint32_t pxl_clk = timing->pix_clk_100hz;
|
||||
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
pxl_clk /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
pxl_clk = pxl_clk * 2 / 3;
|
||||
|
||||
if (timing->display_color_depth == COLOR_DEPTH_101010)
|
||||
pxl_clk = pxl_clk * 10 / 8;
|
||||
else if (timing->display_color_depth == COLOR_DEPTH_121212)
|
||||
pxl_clk = pxl_clk * 12 / 8;
|
||||
|
||||
return pxl_clk;
|
||||
}
|
||||
|
||||
static bool dp_active_dongle_validate_timing(
|
||||
const struct dc_crtc_timing *timing,
|
||||
const struct dpcd_caps *dpcd_caps)
|
||||
{
|
||||
unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
|
||||
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
|
||||
|
||||
switch (dpcd_caps->dongle_type) {
|
||||
@@ -2115,13 +2177,6 @@ static bool dp_active_dongle_validate_timing(
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* Check Color Depth and Pixel Clock */
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
required_pix_clk_100hz /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
case COLOR_DEPTH_888:
|
||||
@@ -2130,14 +2185,11 @@ static bool dp_active_dongle_validate_timing(
|
||||
case COLOR_DEPTH_101010:
|
||||
if (dongle_caps->dp_hdmi_max_bpc < 10)
|
||||
return false;
|
||||
required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
if (dongle_caps->dp_hdmi_max_bpc < 12)
|
||||
return false;
|
||||
required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
|
||||
break;
|
||||
|
||||
case COLOR_DEPTH_141414:
|
||||
case COLOR_DEPTH_161616:
|
||||
default:
|
||||
@@ -2145,7 +2197,7 @@ static bool dp_active_dongle_validate_timing(
|
||||
return false;
|
||||
}
|
||||
|
||||
if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
|
||||
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -2166,7 +2218,7 @@ enum dc_status dc_link_validate_mode_timing(
|
||||
return DC_OK;
|
||||
|
||||
/* Passive Dongle */
|
||||
if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
|
||||
if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
|
||||
return DC_EXCEED_DONGLE_CAP;
|
||||
|
||||
/* Active Dongle*/
|
||||
@@ -2284,14 +2336,13 @@ void core_link_resume(struct dc_link *link)
|
||||
|
||||
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc_link_settings *link_settings =
|
||||
&stream->link->cur_link_settings;
|
||||
uint32_t link_rate_in_mbps =
|
||||
link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
|
||||
struct fixed31_32 mbps = dc_fixpt_from_int(
|
||||
link_rate_in_mbps * link_settings->lane_count);
|
||||
struct fixed31_32 mbytes_per_sec;
|
||||
uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
|
||||
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
|
||||
|
||||
return dc_fixpt_div_int(mbps, 54);
|
||||
mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
|
||||
|
||||
return dc_fixpt_div_int(mbytes_per_sec, 54);
|
||||
}
|
||||
|
||||
static int get_color_depth(enum dc_color_depth color_depth)
|
||||
@@ -2316,7 +2367,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
|
||||
uint32_t denominator;
|
||||
|
||||
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
|
||||
kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
|
||||
kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
|
||||
|
||||
/*
|
||||
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
|
||||
@@ -2551,12 +2602,12 @@ void core_link_enable_stream(
|
||||
struct dc_state *state,
|
||||
struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
enum dc_status status;
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
|
||||
if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
|
||||
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
|
||||
stream->link->link_enc->funcs->setup(
|
||||
stream->link->link_enc,
|
||||
pipe_ctx->stream->signal);
|
||||
@@ -2570,9 +2621,10 @@ void core_link_enable_stream(
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
&stream->timing,
|
||||
stream->output_color_space);
|
||||
stream->output_color_space,
|
||||
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
&stream->timing,
|
||||
@@ -2736,3 +2788,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t dc_bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t bits_per_channel = 0;
|
||||
uint32_t kbps;
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
bits_per_channel = 6;
|
||||
break;
|
||||
case COLOR_DEPTH_888:
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
bits_per_channel = 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
bits_per_channel = 12;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
bits_per_channel = 14;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
bits_per_channel = 16;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(bits_per_channel != 0);
|
||||
|
||||
kbps = timing->pix_clk_100hz / 10;
|
||||
kbps *= bits_per_channel;
|
||||
|
||||
if (timing->flags.Y_ONLY != 1) {
|
||||
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
|
||||
kbps *= 3;
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
kbps /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
kbps = kbps * 2 / 3;
|
||||
}
|
||||
|
||||
return kbps;
|
||||
|
||||
}
|
||||
|
@@ -573,12 +573,28 @@ bool dal_ddc_service_query_ddc_data(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int dc_link_aux_transfer(struct ddc_service *ddc,
|
||||
struct aux_payload *payload)
|
||||
/* dc_link_aux_transfer_raw() - Attempt to transfer
|
||||
* the given aux payload. This function does not perform
|
||||
* retries or handle error states. The reply is returned
|
||||
* in the payload->reply and the result through
|
||||
* *operation_result. Returns the number of bytes transferred,
|
||||
* or -1 on a failure.
|
||||
*/
|
||||
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
|
||||
struct aux_payload *payload,
|
||||
enum aux_channel_operation_result *operation_result)
|
||||
{
|
||||
return dce_aux_transfer(ddc, payload);
|
||||
return dce_aux_transfer_raw(ddc, payload, operation_result);
|
||||
}
|
||||
|
||||
/* dc_link_aux_transfer_with_retries() - Attempt to submit an
|
||||
* aux payload, retrying on timeouts, defers, and busy states
|
||||
* as outlined in the DP spec. Returns true if the request
|
||||
* was successful.
|
||||
*
|
||||
* Unless you want to implement your own retry semantics, this
|
||||
* is probably the one you want.
|
||||
*/
|
||||
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
struct aux_payload *payload)
|
||||
{
|
||||
|
@@ -93,12 +93,10 @@ static void dpcd_set_link_settings(
|
||||
struct dc_link *link,
|
||||
const struct link_training_settings *lt_settings)
|
||||
{
|
||||
uint8_t rate = (uint8_t)
|
||||
(lt_settings->link_settings.link_rate);
|
||||
uint8_t rate;
|
||||
|
||||
union down_spread_ctrl downspread = { {0} };
|
||||
union lane_count_set lane_count_set = { {0} };
|
||||
uint8_t link_set_buffer[2];
|
||||
|
||||
downspread.raw = (uint8_t)
|
||||
(lt_settings->link_settings.link_spread);
|
||||
@@ -111,29 +109,42 @@ static void dpcd_set_link_settings(
|
||||
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
|
||||
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
|
||||
|
||||
link_set_buffer[0] = rate;
|
||||
link_set_buffer[1] = lane_count_set.raw;
|
||||
|
||||
core_link_write_dpcd(link, DP_LINK_BW_SET,
|
||||
link_set_buffer, 2);
|
||||
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
|
||||
&downspread.raw, sizeof(downspread));
|
||||
|
||||
core_link_write_dpcd(link, DP_LANE_COUNT_SET,
|
||||
&lane_count_set.raw, 1);
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
|
||||
(link->dpcd_caps.link_rate_set >= 1 &&
|
||||
link->dpcd_caps.link_rate_set <= 8)) {
|
||||
lt_settings->link_settings.use_link_rate_set == true) {
|
||||
rate = 0;
|
||||
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
|
||||
core_link_write_dpcd(link, DP_LINK_RATE_SET,
|
||||
&link->dpcd_caps.link_rate_set, 1);
|
||||
<_settings->link_settings.link_rate_set, 1);
|
||||
} else {
|
||||
rate = (uint8_t) (lt_settings->link_settings.link_rate);
|
||||
core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
|
||||
}
|
||||
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
|
||||
__func__,
|
||||
DP_LINK_BW_SET,
|
||||
lt_settings->link_settings.link_rate,
|
||||
DP_LANE_COUNT_SET,
|
||||
lt_settings->link_settings.lane_count,
|
||||
DP_DOWNSPREAD_CTRL,
|
||||
lt_settings->link_settings.link_spread);
|
||||
if (rate) {
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
|
||||
__func__,
|
||||
DP_LINK_BW_SET,
|
||||
lt_settings->link_settings.link_rate,
|
||||
DP_LANE_COUNT_SET,
|
||||
lt_settings->link_settings.lane_count,
|
||||
DP_DOWNSPREAD_CTRL,
|
||||
lt_settings->link_settings.link_spread);
|
||||
} else {
|
||||
DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
|
||||
__func__,
|
||||
DP_LINK_RATE_SET,
|
||||
lt_settings->link_settings.link_rate_set,
|
||||
DP_LANE_COUNT_SET,
|
||||
lt_settings->link_settings.lane_count,
|
||||
DP_DOWNSPREAD_CTRL,
|
||||
lt_settings->link_settings.link_spread);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -952,6 +963,8 @@ enum link_training_result dc_link_dp_perform_link_training(
|
||||
|
||||
lt_settings.link_settings.link_rate = link_setting->link_rate;
|
||||
lt_settings.link_settings.lane_count = link_setting->lane_count;
|
||||
lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
|
||||
lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
|
||||
|
||||
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
|
||||
|
||||
@@ -1075,7 +1088,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
|
||||
{
|
||||
/* Set Default link settings */
|
||||
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
|
||||
LINK_SPREAD_05_DOWNSPREAD_30KHZ};
|
||||
LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
|
||||
|
||||
/* Higher link settings based on feature supported */
|
||||
if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
|
||||
@@ -1520,69 +1533,6 @@ static bool decide_fallback_link_setting(
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t bandwidth_in_kbps_from_timing(
|
||||
const struct dc_crtc_timing *timing)
|
||||
{
|
||||
uint32_t bits_per_channel = 0;
|
||||
uint32_t kbps;
|
||||
|
||||
switch (timing->display_color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
bits_per_channel = 6;
|
||||
break;
|
||||
case COLOR_DEPTH_888:
|
||||
bits_per_channel = 8;
|
||||
break;
|
||||
case COLOR_DEPTH_101010:
|
||||
bits_per_channel = 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
bits_per_channel = 12;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
bits_per_channel = 14;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
bits_per_channel = 16;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(bits_per_channel != 0);
|
||||
|
||||
kbps = timing->pix_clk_100hz / 10;
|
||||
kbps *= bits_per_channel;
|
||||
|
||||
if (timing->flags.Y_ONLY != 1) {
|
||||
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
|
||||
kbps *= 3;
|
||||
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
|
||||
kbps /= 2;
|
||||
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
kbps = kbps * 2 / 3;
|
||||
}
|
||||
|
||||
return kbps;
|
||||
|
||||
}
|
||||
|
||||
static uint32_t bandwidth_in_kbps_from_link_settings(
|
||||
const struct dc_link_settings *link_setting)
|
||||
{
|
||||
uint32_t link_rate_in_kbps = link_setting->link_rate *
|
||||
LINK_RATE_REF_FREQ_IN_KHZ;
|
||||
|
||||
uint32_t lane_count = link_setting->lane_count;
|
||||
uint32_t kbps = link_rate_in_kbps;
|
||||
|
||||
kbps *= lane_count;
|
||||
kbps *= 8; /* 8 bits per byte*/
|
||||
|
||||
return kbps;
|
||||
|
||||
}
|
||||
|
||||
bool dp_validate_mode_timing(
|
||||
struct dc_link *link,
|
||||
const struct dc_crtc_timing *timing)
|
||||
@@ -1598,8 +1548,7 @@ bool dp_validate_mode_timing(
|
||||
timing->v_addressable == (uint32_t) 480)
|
||||
return true;
|
||||
|
||||
/* We always use verified link settings */
|
||||
link_setting = &link->verified_link_cap;
|
||||
link_setting = dc_link_get_link_cap(link);
|
||||
|
||||
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
|
||||
/*if (flags.DYNAMIC_VALIDATION == 1 &&
|
||||
@@ -1607,8 +1556,8 @@ bool dp_validate_mode_timing(
|
||||
link_setting = &link->verified_link_cap;
|
||||
*/
|
||||
|
||||
req_bw = bandwidth_in_kbps_from_timing(timing);
|
||||
max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
|
||||
req_bw = dc_bandwidth_in_kbps_from_timing(timing);
|
||||
max_bw = dc_link_bandwidth_kbps(link, link_setting);
|
||||
|
||||
if (req_bw <= max_bw) {
|
||||
/* remember the biggest mode here, during
|
||||
@@ -1629,19 +1578,106 @@ bool dp_validate_mode_timing(
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
|
||||
{
|
||||
struct dc_link_settings initial_link_setting = {
|
||||
LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
|
||||
struct dc_link_settings current_link_setting =
|
||||
initial_link_setting;
|
||||
uint32_t link_bw;
|
||||
|
||||
/* search for the minimum link setting that:
|
||||
* 1. is supported according to the link training result
|
||||
* 2. could support the b/w requested by the timing
|
||||
*/
|
||||
while (current_link_setting.link_rate <=
|
||||
link->verified_link_cap.link_rate) {
|
||||
link_bw = dc_link_bandwidth_kbps(
|
||||
link,
|
||||
¤t_link_setting);
|
||||
if (req_bw <= link_bw) {
|
||||
*link_setting = current_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (current_link_setting.lane_count <
|
||||
link->verified_link_cap.lane_count) {
|
||||
current_link_setting.lane_count =
|
||||
increase_lane_count(
|
||||
current_link_setting.lane_count);
|
||||
} else {
|
||||
current_link_setting.link_rate =
|
||||
increase_link_rate(
|
||||
current_link_setting.link_rate);
|
||||
current_link_setting.lane_count =
|
||||
initial_link_setting.lane_count;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
|
||||
{
|
||||
struct dc_link_settings initial_link_setting;
|
||||
struct dc_link_settings current_link_setting;
|
||||
uint32_t link_bw;
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
|
||||
link->dpcd_caps.edp_supported_link_rates_count == 0 ||
|
||||
link->dc->config.optimize_edp_link_rate == false) {
|
||||
*link_setting = link->verified_link_cap;
|
||||
return true;
|
||||
}
|
||||
|
||||
memset(&initial_link_setting, 0, sizeof(initial_link_setting));
|
||||
initial_link_setting.lane_count = LANE_COUNT_ONE;
|
||||
initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
|
||||
initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
|
||||
initial_link_setting.use_link_rate_set = true;
|
||||
initial_link_setting.link_rate_set = 0;
|
||||
current_link_setting = initial_link_setting;
|
||||
|
||||
/* search for the minimum link setting that:
|
||||
* 1. is supported according to the link training result
|
||||
* 2. could support the b/w requested by the timing
|
||||
*/
|
||||
while (current_link_setting.link_rate <=
|
||||
link->verified_link_cap.link_rate) {
|
||||
link_bw = dc_link_bandwidth_kbps(
|
||||
link,
|
||||
¤t_link_setting);
|
||||
if (req_bw <= link_bw) {
|
||||
*link_setting = current_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (current_link_setting.lane_count <
|
||||
link->verified_link_cap.lane_count) {
|
||||
current_link_setting.lane_count =
|
||||
increase_lane_count(
|
||||
current_link_setting.lane_count);
|
||||
} else {
|
||||
if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
|
||||
current_link_setting.link_rate_set++;
|
||||
current_link_setting.link_rate =
|
||||
link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
|
||||
current_link_setting.lane_count =
|
||||
initial_link_setting.lane_count;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void decide_link_settings(struct dc_stream_state *stream,
|
||||
struct dc_link_settings *link_setting)
|
||||
{
|
||||
|
||||
struct dc_link_settings initial_link_setting = {
|
||||
LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
|
||||
struct dc_link_settings current_link_setting =
|
||||
initial_link_setting;
|
||||
struct dc_link *link;
|
||||
uint32_t req_bw;
|
||||
uint32_t link_bw;
|
||||
|
||||
req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
|
||||
|
||||
link = stream->link;
|
||||
|
||||
@@ -1664,38 +1700,11 @@ void decide_link_settings(struct dc_stream_state *stream,
|
||||
return;
|
||||
}
|
||||
|
||||
/* EDP use the link cap setting */
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
*link_setting = link->verified_link_cap;
|
||||
return;
|
||||
}
|
||||
|
||||
/* search for the minimum link setting that:
|
||||
* 1. is supported according to the link training result
|
||||
* 2. could support the b/w requested by the timing
|
||||
*/
|
||||
while (current_link_setting.link_rate <=
|
||||
link->verified_link_cap.link_rate) {
|
||||
link_bw = bandwidth_in_kbps_from_link_settings(
|
||||
¤t_link_setting);
|
||||
if (req_bw <= link_bw) {
|
||||
*link_setting = current_link_setting;
|
||||
if (decide_edp_link_settings(link, link_setting, req_bw))
|
||||
return;
|
||||
}
|
||||
|
||||
if (current_link_setting.lane_count <
|
||||
link->verified_link_cap.lane_count) {
|
||||
current_link_setting.lane_count =
|
||||
increase_lane_count(
|
||||
current_link_setting.lane_count);
|
||||
} else {
|
||||
current_link_setting.link_rate =
|
||||
increase_link_rate(
|
||||
current_link_setting.link_rate);
|
||||
current_link_setting.lane_count =
|
||||
initial_link_setting.lane_count;
|
||||
}
|
||||
}
|
||||
} else if (decide_dp_link_settings(link, link_setting, req_bw))
|
||||
return;
|
||||
|
||||
BREAK_TO_DEBUGGER();
|
||||
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
|
||||
@@ -2155,11 +2164,7 @@ bool is_mst_supported(struct dc_link *link)
|
||||
|
||||
bool is_dp_active_dongle(const struct dc_link *link)
|
||||
{
|
||||
enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
|
||||
|
||||
return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
|
||||
(dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
|
||||
(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
|
||||
return link->dpcd_caps.is_branch_dev;
|
||||
}
|
||||
|
||||
static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
|
||||
@@ -2180,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void read_dp_device_vendor_id(struct dc_link *link)
|
||||
{
|
||||
struct dp_device_vendor_id dp_id;
|
||||
|
||||
/* read IEEE branch device id */
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_BRANCH_OUI,
|
||||
(uint8_t *)&dp_id,
|
||||
sizeof(dp_id));
|
||||
|
||||
link->dpcd_caps.branch_dev_id =
|
||||
(dp_id.ieee_oui[0] << 16) +
|
||||
(dp_id.ieee_oui[1] << 8) +
|
||||
dp_id.ieee_oui[2];
|
||||
|
||||
memmove(
|
||||
link->dpcd_caps.branch_dev_name,
|
||||
dp_id.ieee_device_id,
|
||||
sizeof(dp_id.ieee_device_id));
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void get_active_converter_info(
|
||||
uint8_t data, struct dc_link *link)
|
||||
{
|
||||
@@ -2193,6 +2222,9 @@ static void get_active_converter_info(
|
||||
return;
|
||||
}
|
||||
|
||||
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
|
||||
link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
|
||||
|
||||
switch (ds_port.fields.PORT_TYPE) {
|
||||
case DOWNSTREAM_VGA:
|
||||
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
|
||||
@@ -2234,8 +2266,8 @@ static void get_active_converter_info(
|
||||
hdmi_caps = {.raw = det_caps[3] };
|
||||
union dwnstream_port_caps_byte2
|
||||
hdmi_color_caps = {.raw = det_caps[2] };
|
||||
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
|
||||
det_caps[1] * 25000;
|
||||
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
|
||||
det_caps[1] * 2500;
|
||||
|
||||
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
|
||||
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
|
||||
@@ -2252,7 +2284,7 @@ static void get_active_converter_info(
|
||||
translate_dpcd_max_bpc(
|
||||
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
|
||||
|
||||
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
|
||||
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
|
||||
link->dpcd_caps.dongle_caps.extendedCapValid = true;
|
||||
}
|
||||
|
||||
@@ -2262,27 +2294,6 @@ static void get_active_converter_info(
|
||||
|
||||
ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
|
||||
|
||||
{
|
||||
struct dp_device_vendor_id dp_id;
|
||||
|
||||
/* read IEEE branch device id */
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_BRANCH_OUI,
|
||||
(uint8_t *)&dp_id,
|
||||
sizeof(dp_id));
|
||||
|
||||
link->dpcd_caps.branch_dev_id =
|
||||
(dp_id.ieee_oui[0] << 16) +
|
||||
(dp_id.ieee_oui[1] << 8) +
|
||||
dp_id.ieee_oui[2];
|
||||
|
||||
memmove(
|
||||
link->dpcd_caps.branch_dev_name,
|
||||
dp_id.ieee_device_id,
|
||||
sizeof(dp_id.ieee_device_id));
|
||||
}
|
||||
|
||||
{
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
|
||||
@@ -2347,6 +2358,10 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
{
|
||||
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
|
||||
|
||||
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
|
||||
*/
|
||||
uint8_t dpcd_dprx_data = '\0';
|
||||
|
||||
struct dp_device_vendor_id sink_id;
|
||||
union down_stream_port_count down_strm_port_count;
|
||||
union edp_configuration_cap edp_config_cap;
|
||||
@@ -2383,7 +2398,10 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
aux_rd_interval.raw =
|
||||
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
|
||||
|
||||
if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
|
||||
link->dpcd_caps.ext_receiver_cap_field_present =
|
||||
aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
|
||||
|
||||
if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
|
||||
uint8_t ext_cap_data[16];
|
||||
|
||||
memset(ext_cap_data, '\0', sizeof(ext_cap_data));
|
||||
@@ -2404,11 +2422,44 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
}
|
||||
|
||||
link->dpcd_caps.dpcd_rev.raw =
|
||||
dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
|
||||
dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPRX_FEATURE_ENUMERATION_LIST,
|
||||
&dpcd_dprx_data,
|
||||
sizeof(dpcd_dprx_data));
|
||||
if (status == DC_OK)
|
||||
break;
|
||||
}
|
||||
|
||||
link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
|
||||
|
||||
if (status != DC_OK)
|
||||
dm_error("%s: Read DPRX caps data failed.\n", __func__);
|
||||
}
|
||||
|
||||
else {
|
||||
link->dpcd_caps.dprx_feature.raw = 0;
|
||||
}
|
||||
|
||||
|
||||
/* Error condition checking...
|
||||
* It is impossible for Sink to report Max Lane Count = 0.
|
||||
* It is possible for Sink to report Max Link Rate = 0, if it is
|
||||
* an eDP device that is reporting specialized link rates in the
|
||||
* SUPPORTED_LINK_RATE table.
|
||||
*/
|
||||
if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
|
||||
return false;
|
||||
|
||||
ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
|
||||
DP_DPCD_REV];
|
||||
|
||||
read_dp_device_vendor_id(link);
|
||||
|
||||
get_active_converter_info(ds_port.byte, link);
|
||||
|
||||
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
|
||||
@@ -2536,31 +2587,31 @@ enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
|
||||
|
||||
void detect_edp_sink_caps(struct dc_link *link)
|
||||
{
|
||||
uint8_t supported_link_rates[16] = {0};
|
||||
uint8_t supported_link_rates[16];
|
||||
uint32_t entry;
|
||||
uint32_t link_rate_in_khz;
|
||||
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
|
||||
|
||||
retrieve_link_cap(link);
|
||||
link->dpcd_caps.edp_supported_link_rates_count = 0;
|
||||
memset(supported_link_rates, 0, sizeof(supported_link_rates));
|
||||
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
|
||||
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
|
||||
link->dc->config.optimize_edp_link_rate) {
|
||||
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
|
||||
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
|
||||
supported_link_rates, sizeof(supported_link_rates));
|
||||
|
||||
link->dpcd_caps.link_rate_set = 0;
|
||||
for (entry = 0; entry < 16; entry += 2) {
|
||||
// DPCD register reports per-lane link rate = 16-bit link rate capability
|
||||
// value X 200 kHz. Need multipler to find link rate in kHz.
|
||||
// value X 200 kHz. Need multiplier to find link rate in kHz.
|
||||
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
|
||||
supported_link_rates[entry]) * 200;
|
||||
|
||||
if (link_rate_in_khz != 0) {
|
||||
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
|
||||
if (link->reported_link_cap.link_rate < link_rate) {
|
||||
link->reported_link_cap.link_rate = link_rate;
|
||||
link->dpcd_caps.link_rate_set = entry;
|
||||
}
|
||||
link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
|
||||
link->dpcd_caps.edp_supported_link_rates_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2601,6 +2652,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
||||
enum dc_color_depth color_depth = pipe_ctx->
|
||||
stream->timing.display_color_depth;
|
||||
struct bit_depth_reduction_params params;
|
||||
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
@@ -2640,8 +2692,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
||||
{
|
||||
/* disable bit depth reduction */
|
||||
pipe_ctx->stream->bit_depth_params = params;
|
||||
pipe_ctx->stream_res.opp->funcs->
|
||||
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, ¶ms);
|
||||
opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
|
||||
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
|
||||
controller_test_pattern, color_depth);
|
||||
@@ -2650,11 +2701,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
|
||||
case DP_TEST_PATTERN_VIDEO_MODE:
|
||||
{
|
||||
/* restore bitdepth reduction */
|
||||
resource_build_bit_depth_reduction_params(pipe_ctx->stream,
|
||||
¶ms);
|
||||
resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms);
|
||||
pipe_ctx->stream->bit_depth_params = params;
|
||||
pipe_ctx->stream_res.opp->funcs->
|
||||
opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, ¶ms);
|
||||
opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
|
||||
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
|
||||
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user