Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next
Backmerge Linus master to get the connector locking revert. * 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux: (645 commits) sysctl: fix proc_doulongvec_ms_jiffies_minmax() Revert "drm/probe-helpers: Drop locking from poll_enable" MAINTAINERS: add Dan Streetman to zbud maintainers MAINTAINERS: add Dan Streetman to zswap maintainers mm: do not export ioremap_page_range symbol for external module mn10300: fix build error of missing fpu_save() romfs: use different way to generate fsid for BLOCK or MTD frv: add missing atomic64 operations mm, page_alloc: fix premature OOM when racing with cpuset mems update mm, page_alloc: move cpuset seqcount checking to slowpath mm, page_alloc: fix fast-path race with cpuset update or removal mm, page_alloc: fix check for NULL preferred_zone kernel/panic.c: add missing \n fbdev: color map copying bounds checking frv: add atomic64_add_unless() mm/mempolicy.c: do not put mempolicy before using its nodemask radix-tree: fix private list warnings Documentation/filesystems/proc.txt: add VmPin mm, memcg: do not retry precharge charges proc: add a schedule point in proc_pid_readdir() ...
This commit is contained in:
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
else if (type == CGS_UCODE_ID_SMU_SK)
|
||||
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("SMC firmware not supported\n");
|
||||
return -EINVAL;
|
||||
|
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
|
||||
"STONEY",
|
||||
"POLARIS10",
|
||||
"POLARIS11",
|
||||
"POLARIS12",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_FIJI:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
|
||||
|
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
|
||||
/* Polaris12 */
|
||||
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_TOPAZ:
|
||||
|
@@ -65,6 +65,7 @@
|
||||
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
|
||||
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
|
||||
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
|
||||
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_ctx - Command submission parser context
|
||||
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
|
||||
MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
|
||||
|
||||
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
|
||||
|
||||
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS11:
|
||||
fw_name = FIRMWARE_POLARIS11;
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
fw_name = FIRMWARE_POLARIS12;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -52,6 +52,7 @@
|
||||
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
|
||||
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
|
||||
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
|
||||
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
|
||||
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
|
||||
MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
|
||||
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
|
||||
|
||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||
|
||||
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
||||
case CHIP_POLARIS11:
|
||||
fw_name = FIRMWARE_POLARIS11;
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
fw_name = FIRMWARE_POLARIS12;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v10_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
@@ -2620,7 +2617,6 @@ unpin:
|
||||
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
polaris11_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
|
||||
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
|
||||
num_crtc = 6;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
num_crtc = 5;
|
||||
break;
|
||||
default:
|
||||
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
|
||||
adev->mode_info.audio.num_pins = 8;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
adev->mode_info.audio.num_pins = 6;
|
||||
break;
|
||||
default:
|
||||
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
|
||||
int pll;
|
||||
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11)) {
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
|
||||
@@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v11_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
@@ -2636,7 +2637,6 @@ unpin:
|
||||
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
@@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
@@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11)) {
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
struct amdgpu_encoder *amdgpu_encoder =
|
||||
to_amdgpu_encoder(amdgpu_crtc->encoder);
|
||||
int encoder_mode =
|
||||
@@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle)
|
||||
adev->mode_info.num_dig = 6;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
adev->mode_info.num_hpd = 5;
|
||||
adev->mode_info.num_dig = 5;
|
||||
break;
|
||||
@@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle)
|
||||
amdgpu_atombios_crtc_powergate_init(adev);
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
if ((adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11)) {
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
|
||||
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
|
||||
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
|
||||
|
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
int w = amdgpu_crtc->cursor_width;
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v6_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
@@ -1986,7 +1985,6 @@ unpin:
|
||||
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
dce_v6_0_lock_cursor(crtc, false);
|
||||
}
|
||||
|
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v8_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
@@ -2471,7 +2468,6 @@ unpin:
|
||||
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
|
||||
|
||||
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
||||
{
|
||||
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
|
||||
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
(const u32)ARRAY_SIZE(tonga_golden_common_all));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_polaris11_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
break;
|
||||
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
ret = amdgpu_atombios_get_gfx_info(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
||||
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
|
||||
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
|
||||
*rconf1 |= 0x0;
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
|
||||
SE_XSEL(1) | SE_YSEL(1);
|
||||
*rconf1 |= 0x0;
|
||||
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
|
||||
cz_enable_cp_power_gating(adev, true);
|
||||
else
|
||||
cz_enable_cp_power_gating(adev, false);
|
||||
} else if (adev->asic_type == CHIP_POLARIS11) {
|
||||
} else if ((adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
gfx_v8_0_init_csb(adev);
|
||||
gfx_v8_0_init_save_restore_list(adev);
|
||||
gfx_v8_0_enable_save_restore_machine(adev);
|
||||
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
|
||||
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
|
||||
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
|
||||
if (adev->asic_type == CHIP_POLARIS11 ||
|
||||
adev->asic_type == CHIP_POLARIS10) {
|
||||
adev->asic_type == CHIP_POLARIS10 ||
|
||||
adev->asic_type == CHIP_POLARIS12) {
|
||||
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
|
||||
tmp &= ~0x3;
|
||||
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
|
||||
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||
amdgpu_ring_write(ring, 0x0000002A);
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_ring_write(ring, 0x16000012);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
break;
|
||||
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
||||
(adev->asic_type == CHIP_FIJI) ||
|
||||
(adev->asic_type == CHIP_STONEY) ||
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS10)) {
|
||||
(adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS12)) {
|
||||
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
AMDGPU_DOORBELL_KIQ << 2);
|
||||
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
||||
mqd->cp_hqd_persistent_state = tmp;
|
||||
if (adev->asic_type == CHIP_STONEY ||
|
||||
adev->asic_type == CHIP_POLARIS11 ||
|
||||
adev->asic_type == CHIP_POLARIS10) {
|
||||
adev->asic_type == CHIP_POLARIS10 ||
|
||||
adev->asic_type == CHIP_POLARIS12) {
|
||||
tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
|
||||
WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
|
||||
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
|
||||
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
if (adev->asic_type == CHIP_POLARIS11)
|
||||
if ((adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12))
|
||||
/* Send msg to SMU via Powerplay */
|
||||
amdgpu_set_powergating_state(adev,
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
|
||||
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
|
||||
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
|
||||
else
|
||||
|
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/si58_mc.bin");
|
||||
|
||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
|
||||
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err;
|
||||
bool is_58_fw = false;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
/* this memory configuration requires special firmware */
|
||||
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
|
||||
is_58_fw = true;
|
||||
|
||||
if (is_58_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32(mmVM_CONTEXT1_CNTL,
|
||||
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
|
||||
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
gmc_v6_0_set_fault_enable_default(adev, false);
|
||||
else
|
||||
gmc_v6_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_sw_init(void *handle)
|
||||
|
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
|
||||
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
|
||||
|
||||
static const u32 golden_settings_tonga_a11[] =
|
||||
{
|
||||
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_polaris11_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
|
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
|
||||
|
||||
|
||||
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
|
||||
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
golden_settings_polaris11_a11,
|
||||
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
|
||||
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
chip_name = "carrizo";
|
||||
break;
|
||||
|
@@ -56,7 +56,6 @@
|
||||
#define BIOS_SCRATCH_4 0x5cd
|
||||
|
||||
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_smc.bin");
|
||||
@@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
@@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (adev->asic_type == CHIP_VERDE) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6820) ||
|
||||
(adev->pdev->device == 0x6821) ||
|
||||
(adev->pdev->device == 0x6822) ||
|
||||
(adev->pdev->device == 0x6823) ||
|
||||
(adev->pdev->device == 0x682A) ||
|
||||
(adev->pdev->device == 0x682B)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->revision == 0xC7) ||
|
||||
(adev->pdev->revision == 0x80) ||
|
||||
(adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
@@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
@@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "tahiti";
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->device == 0x6810) ||
|
||||
(adev->pdev->device == 0x6811) ||
|
||||
(adev->pdev->device == 0x6816) ||
|
||||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
if ((adev->pdev->revision == 0x81) &&
|
||||
((adev->pdev->device == 0x6810) ||
|
||||
(adev->pdev->device == 0x6811)))
|
||||
chip_name = "pitcairn_k";
|
||||
else
|
||||
chip_name = "pitcairn";
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6820) ||
|
||||
(adev->pdev->device == 0x6821) ||
|
||||
(adev->pdev->device == 0x6822) ||
|
||||
(adev->pdev->device == 0x6823) ||
|
||||
(adev->pdev->device == 0x682A) ||
|
||||
(adev->pdev->device == 0x682B))
|
||||
if (((adev->pdev->device == 0x6820) &&
|
||||
((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83))) ||
|
||||
((adev->pdev->device == 0x6821) &&
|
||||
((adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87))) ||
|
||||
((adev->pdev->revision == 0x87) &&
|
||||
((adev->pdev->device == 0x6823) ||
|
||||
(adev->pdev->device == 0x682b))))
|
||||
chip_name = "verde_k";
|
||||
else
|
||||
chip_name = "verde";
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
if ((adev->pdev->revision == 0xC7) ||
|
||||
(adev->pdev->revision == 0x80) ||
|
||||
(adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605))
|
||||
if (((adev->pdev->revision == 0x81) &&
|
||||
((adev->pdev->device == 0x6600) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605) ||
|
||||
(adev->pdev->device == 0x6610))) ||
|
||||
((adev->pdev->revision == 0x83) &&
|
||||
(adev->pdev->device == 0x6610)))
|
||||
chip_name = "oland_k";
|
||||
else
|
||||
chip_name = "oland";
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0xC3) ||
|
||||
(adev->pdev->device == 0x6664) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667))
|
||||
if (((adev->pdev->revision == 0x81) &&
|
||||
(adev->pdev->device == 0x6660)) ||
|
||||
((adev->pdev->revision == 0x83) &&
|
||||
((adev->pdev->device == 0x6660) ||
|
||||
(adev->pdev->device == 0x6663) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667))))
|
||||
chip_name = "hainan_k";
|
||||
else if ((adev->pdev->revision == 0xc3) &&
|
||||
(adev->pdev->device == 0x6665))
|
||||
chip_name = "banks_k_2";
|
||||
else
|
||||
chip_name = "hainan";
|
||||
break;
|
||||
|
@@ -40,13 +40,14 @@
|
||||
#include "smu/smu_7_0_1_sh_mask.h"
|
||||
|
||||
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_start(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
||||
bool sw_mode);
|
||||
/**
|
||||
* uvd_v4_2_ring_get_rptr - get read pointer
|
||||
*
|
||||
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
/**
|
||||
* uvd_v4_2_hw_init - start and test UVD block
|
||||
*
|
||||
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
uvd_v4_2_init_cg(adev);
|
||||
uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t rb_bufsz;
|
||||
int i, j, r;
|
||||
|
||||
/* disable byte swapping */
|
||||
u32 lmi_swap_cntl = 0;
|
||||
u32 mp_swap_cntl = 0;
|
||||
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
uvd_v4_2_set_dcm(adev, true);
|
||||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
||||
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
|
||||
}
|
||||
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
bool hw_mode = true;
|
||||
|
||||
if (hw_mode) {
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
} else {
|
||||
u32 tmp = RREG32(mmUVD_CGC_CTRL);
|
||||
tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
|
||||
WREG32(mmUVD_CGC_CTRL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool uvd_v4_2_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, gate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
return 0;
|
||||
|
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
|
||||
static int curstate = -1;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (curstate == state)
|
||||
return 0;
|
||||
|
||||
curstate = state;
|
||||
if (enable) {
|
||||
/* wait for STATUS to clear */
|
||||
if (uvd_v5_0_wait_for_idle(handle))
|
||||
|
@@ -43,9 +43,13 @@
|
||||
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
|
||||
#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
|
||||
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
|
||||
#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
|
||||
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
#define VCE_V3_0_FW_SIZE (384 * 1024)
|
||||
@@ -54,6 +58,9 @@
|
||||
|
||||
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
|
||||
|
||||
#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
|
||||
| GRBM_GFX_INDEX__VCE_ALL_PIPE)
|
||||
|
||||
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
data &= ~0xffc00000;
|
||||
data &= ~0x3ff;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
|
||||
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
vce_v3_0_mc_resume(adev, idx);
|
||||
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
||||
|
||||
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
|
||||
if (adev->asic_type >= CHIP_STONEY)
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
|
||||
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
||||
vce_v3_0_set_vce_sw_clock_gating(adev, false);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
@@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
|
||||
/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
|
||||
if ((adev->asic_type == CHIP_FIJI) ||
|
||||
(adev->asic_type == CHIP_STONEY) ||
|
||||
(adev->asic_type == CHIP_POLARIS10) ||
|
||||
(adev->asic_type == CHIP_POLARIS11))
|
||||
(adev->asic_type == CHIP_POLARIS11) ||
|
||||
(adev->asic_type == CHIP_POLARIS12))
|
||||
return AMDGPU_VCE_HARVEST_VCE1;
|
||||
|
||||
/* Tonga and CZ are dual or single pipe */
|
||||
@@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
|
||||
* VCE team suggest use bit 3--bit 6 for busy status check
|
||||
*/
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
@@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
if (adev->vce.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
|
||||
|
||||
if (enable) {
|
||||
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
|
||||
@@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
|
||||
case CHIP_TONGA:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
asic_register_table = cz_allowed_read_registers;
|
||||
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x50;
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x64;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
|
||||
case CHIP_TONGA:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS12:
|
||||
vi_common_set_clockgating_state_by_smu(adev, state);
|
||||
default:
|
||||
break;
|
||||
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
amdgpu_ip_block_add(adev, &vi_common_ip_block);
|
||||
amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
|
||||
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#ifndef __AMD_SHARED_H__
|
||||
#define __AMD_SHARED_H__
|
||||
|
||||
#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
|
||||
|
||||
/*
|
||||
* Supported ASIC types
|
||||
@@ -46,6 +46,7 @@ enum amd_asic_type {
|
||||
CHIP_STONEY,
|
||||
CHIP_POLARIS10,
|
||||
CHIP_POLARIS11,
|
||||
CHIP_POLARIS12,
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
|
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
|
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
} else {
|
||||
/*EPR# 419220 -HW limitation to to */
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
|
||||
/*Program HardMin based on the vce_arbiter.ecclk */
|
||||
if (hwmgr->vce_arbiter.ecclk == 0) {
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin, 0);
|
||||
/* disable ECLK DPM 0. Otherwise VCE could hang if
|
||||
* switching SCLK from DPM 0 to 6/7 */
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkSoftMin, 1);
|
||||
} else {
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
polaris_set_asic_special_caps(hwmgr);
|
||||
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
|
||||
break;
|
||||
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_TablelessHardwareInterface);
|
||||
|
||||
if (hwmgr->chip_id == CHIP_POLARIS11)
|
||||
if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
|
||||
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
|
||||
PHM_PlatformCaps_SPLLShutdownSupport);
|
||||
return 0;
|
||||
|
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
|
||||
} else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
|
||||
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
|
||||
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
|
||||
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
|
||||
|
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
case CHIP_POLARIS10:
|
||||
case CHIP_POLARIS12:
|
||||
polaris10_smum_init(smumgr);
|
||||
break;
|
||||
default:
|
||||
|
@@ -1385,6 +1385,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
phy_power_on(dp->phy);
|
||||
|
||||
analogix_dp_init_dp(dp);
|
||||
@@ -1417,9 +1418,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
||||
goto err_disable_pm_runtime;
|
||||
}
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_pm_runtime:
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
|
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
|
||||
This is a KMS driver for emulated cirrus device in qemu.
|
||||
It is *NOT* intended for real cirrus devices. This requires
|
||||
the modesetting userspace X.org driver.
|
||||
|
||||
Cirrus is obsolete, the hardware was designed in the 90ies
|
||||
and can't keep up with todays needs. More background:
|
||||
https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||
|
||||
Better alternatives are:
|
||||
- stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
|
||||
- qxl (DRM_QXL, qemu -vga qxl, works best with spice)
|
||||
- virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
|
||||
|
@@ -1227,8 +1227,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
|
||||
if (!nonblock) {
|
||||
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
||||
return NULL;
|
||||
|
||||
mode->type |= DRM_MODE_TYPE_USERDEF;
|
||||
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
|
||||
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
|
||||
mode->hdisplay = 1366;
|
||||
mode->hsync_start--;
|
||||
mode->hsync_end--;
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
return mode;
|
||||
}
|
||||
|
@@ -146,8 +146,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
|
||||
drm_connector_list_iter_put(&conn_iter);
|
||||
|
||||
if (dev->mode_config.delayed_event) {
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
* Use short (1s) delay to handle the initial delayed event.
|
||||
* This delay should not be needed, but Optimus/nouveau will
|
||||
* fail in a mysterious way if the delayed event is handled as
|
||||
* soon as possible like it is done in
|
||||
* drm_helper_probe_single_connector_modes() in case the poll
|
||||
* was enabled before.
|
||||
*/
|
||||
poll = true;
|
||||
delay = 0;
|
||||
delay = HZ;
|
||||
}
|
||||
|
||||
if (poll)
|
||||
|
@@ -117,9 +117,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
||||
struct list_head list;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
|
||||
* drm_mm into giving out a low IOVA after address space
|
||||
* rollover. This needs a proper fix.
|
||||
*/
|
||||
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
|
||||
size, 0, mmu->last_iova, ~0UL,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
|
||||
|
||||
if (ret != -ENOSPC)
|
||||
break;
|
||||
|
@@ -46,7 +46,8 @@ enum decon_flag_bits {
|
||||
BIT_CLKS_ENABLED,
|
||||
BIT_IRQS_ENABLED,
|
||||
BIT_WIN_UPDATED,
|
||||
BIT_SUSPENDED
|
||||
BIT_SUSPENDED,
|
||||
BIT_REQUEST_UPDATE
|
||||
};
|
||||
|
||||
struct decon_context {
|
||||
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
|
||||
m->crtc_vsync_end = m->crtc_vsync_start + 1;
|
||||
}
|
||||
|
||||
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
|
||||
|
||||
/* enable clock gate */
|
||||
val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
|
||||
writel(val, ctx->addr + DECON_CMU);
|
||||
|
||||
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
|
||||
decon_setup_trigger(ctx);
|
||||
|
||||
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
||||
|
||||
/* window enable */
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
||||
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
||||
return;
|
||||
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
||||
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
||||
for (i = ctx->first_win; i < WINDOWS_NR; i++)
|
||||
decon_shadow_protect_win(ctx, i, false);
|
||||
|
||||
/* standalone update */
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
|
||||
if (ctx->out_type & IFTYPE_I80)
|
||||
set_bit(BIT_WIN_UPDATED, &ctx->flags);
|
||||
|
@@ -37,13 +37,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
|
||||
#define BYTES_TO_MB(b) ((b) >> 20ULL)
|
||||
|
||||
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
|
||||
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
|
||||
#define HOST_FENCE 4
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@@ -168,6 +161,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
POSTING_READ(fence_reg_lo);
|
||||
}
|
||||
|
||||
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
}
|
||||
|
||||
static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@@ -181,9 +182,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
@@ -211,13 +212,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
continue;
|
||||
list_del(pos);
|
||||
vgpu->fence.regs[i] = reg;
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
if (++i == vgpu_fence_sz(vgpu))
|
||||
break;
|
||||
}
|
||||
if (i != vgpu_fence_sz(vgpu))
|
||||
goto out_free_fence;
|
||||
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return 0;
|
||||
@@ -316,6 +318,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
|
||||
free_resource(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_resource - reset resource state owned by a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to reset resource state owned by a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
|
||||
* @vgpu: vGPU
|
||||
|
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
* @primary: is the vGPU presented as primary
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
|
||||
{
|
||||
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
|
||||
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
|
||||
if (cmd & PCI_COMMAND_MEMORY) {
|
||||
trap_gttmmio(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Currently we only do such reset when vGPU is not
|
||||
* owned by any VM, so we simply restore entire cfg
|
||||
* space to default value.
|
||||
*/
|
||||
intel_vgpu_init_cfg_space(vgpu, primary);
|
||||
}
|
||||
|
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
|
||||
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
u64 pte;
|
||||
|
||||
#ifdef readq
|
||||
pte = readq(addr);
|
||||
#else
|
||||
pte = ioread32(addr);
|
||||
pte |= (u64)ioread32(addr + 4) << 32;
|
||||
#endif
|
||||
return pte;
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
|
||||
#ifdef writeq
|
||||
writeq(pte, addr);
|
||||
#else
|
||||
iowrite32((u32)pte, addr);
|
||||
iowrite32(pte >> 32, addr + 4);
|
||||
#endif
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
|
||||
info->gtt_entry_size;
|
||||
mem = kzalloc(mm->has_shadow_page_table ?
|
||||
mm->page_table_entry_size * 2
|
||||
: mm->page_table_entry_size,
|
||||
GFP_ATOMIC);
|
||||
: mm->page_table_entry_size, GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
mm->virtual_page_table = mem;
|
||||
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_mm *mm;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
int page_entry_num = GTT_PAGE_SIZE >>
|
||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||
struct page *scratch_pt;
|
||||
void *scratch_pt;
|
||||
unsigned long mfn;
|
||||
int i;
|
||||
void *p;
|
||||
|
||||
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scratch_pt) {
|
||||
gvt_err("fail to allocate scratch page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
p = kmap_atomic(scratch_pt);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(p);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
|
||||
kunmap_atomic(p);
|
||||
__free_page(scratch_pt);
|
||||
gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
|
||||
free_page((unsigned long)scratch_pt);
|
||||
return -EFAULT;
|
||||
}
|
||||
gtt->scratch_pt[type].page_mfn = mfn;
|
||||
gtt->scratch_pt[type].page = scratch_pt;
|
||||
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
||||
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||
vgpu->id, type, mfn);
|
||||
|
||||
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
||||
* 'type' pt.
|
||||
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
||||
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||
*/
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
se.val64 |= PPAT_CACHED_INDEX;
|
||||
|
||||
for (i = 0; i < page_entry_num; i++)
|
||||
ops->set_entry(p, &se, i, false, 0, vgpu);
|
||||
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
|
||||
}
|
||||
|
||||
kunmap_atomic(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page_addr;
|
||||
void *page;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
gvt->gtt.scratch_ggtt_page =
|
||||
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!gvt->gtt.scratch_ggtt_page) {
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
gvt_err("fail to allocate scratch ggtt page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
||||
|
||||
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
gvt->gtt.scratch_ggtt_mfn =
|
||||
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||
gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
|
||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate scratch ggtt page\n");
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||
* @vgpu: a vGPU
|
||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
||||
*
|
||||
* This function is called from vfio core to reset reset all
|
||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
{
|
||||
int i;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
/* clear scratch page for security */
|
||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||
if (vgpu->gtt.scratch_pt[i].page != NULL)
|
||||
memset(page_address(vgpu->gtt.scratch_pt[i].page),
|
||||
0, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
@@ -313,6 +317,8 @@ out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
out_clean_idr:
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
kfree(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
|
||||
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param);
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
|
||||
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
||||
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
||||
unsigned long *g_index);
|
||||
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary);
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
|
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
void *read, void *write)
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
default:
|
||||
/*should not hit here*/
|
||||
gvt_err("invalid forcewake offset 0x%x\n", offset);
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
|
||||
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes, unsigned long bitmap)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, bitmap);
|
||||
|
||||
/* full GPU reset */
|
||||
if (bitmap == 0xff) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
setup_vgpu_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_init_gtt(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int engine_mask = 0;
|
||||
u32 data;
|
||||
u64 bitmap = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & GEN6_GRDOM_FULL) {
|
||||
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
|
||||
bitmap = 0xff;
|
||||
engine_mask = ALL_ENGINES;
|
||||
} else {
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
engine_mask |= (1 << VCS2);
|
||||
}
|
||||
}
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
bitmap |= (1 << VCS2);
|
||||
}
|
||||
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 data;
|
||||
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int id = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
id = VECS;
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
set_bit(id, (void *)vgpu->tlb_handle_pending);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
|
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
struct intel_vgpu_type *type;
|
||||
struct device *pdev;
|
||||
void *gvt;
|
||||
int ret;
|
||||
|
||||
pdev = mdev_parent_dev(mdev);
|
||||
gvt = kdev_to_i915(pdev)->gvt;
|
||||
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
if (!type) {
|
||||
gvt_err("failed to find type %s to create\n",
|
||||
kobject_name(kobj));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
||||
if (IS_ERR_OR_NULL(vgpu)) {
|
||||
gvt_err("create intel vgpu failed\n");
|
||||
return -EINVAL;
|
||||
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
||||
gvt_err("failed to create intel vgpu: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
|
||||
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
|
||||
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
|
||||
dev_name(mdev_dev(mdev)));
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_vgpu_remove(struct mdev_device *mdev)
|
||||
|
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else
|
||||
} else {
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@@ -302,3 +303,56 @@ err:
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_mmio - reset virtual MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_mmio - init MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_mmio - clean MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
|
||||
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||
|
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
vgpu->id))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
|
||||
GFP_DMA32 | __GFP_ZERO,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
|
||||
__GFP_ZERO,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
vgpu_opregion(vgpu)->va = NULL;
|
||||
}
|
||||
|
@@ -50,8 +50,7 @@
|
||||
#define INTEL_GVT_OPREGION_PARM 0x204
|
||||
|
||||
#define INTEL_GVT_OPREGION_PAGES 2
|
||||
#define INTEL_GVT_OPREGION_PORDER 1
|
||||
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
|
||||
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
|
||||
|
||||
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
|
||||
|
||||
|
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload;
|
||||
struct intel_vgpu *vgpu;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
vgpu = workload->vgpu;
|
||||
|
||||
if (!workload->status && !workload->vgpu->resetting) {
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(workload->vgpu,
|
||||
event);
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
scheduler->current_workload[ring_id] = NULL;
|
||||
|
||||
atomic_dec(&workload->vgpu->running_workload_num);
|
||||
|
||||
list_del_init(&workload->list);
|
||||
workload->complete(workload);
|
||||
|
||||
atomic_dec(&vgpu->running_workload_num);
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
@@ -459,11 +459,11 @@ complete:
|
||||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (workload->req)
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
@@ -35,79 +35,6 @@
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
if (vgpu->mmio.vreg)
|
||||
memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
|
||||
else {
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!param->primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||
if (low_avail / min_low == 0)
|
||||
break;
|
||||
gvt->types[i].low_gm_size = min_low;
|
||||
gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
|
||||
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
|
||||
gvt->types[i].fence = 4;
|
||||
gvt->types[i].max_instance = low_avail / min_low;
|
||||
gvt->types[i].avail_instance = gvt->types[i].max_instance;
|
||||
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
||||
*/
|
||||
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_low_gm_size;
|
||||
high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
|
||||
high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_high_gm_size;
|
||||
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
|
||||
gvt->fence.vgpu_allocated_fence_num;
|
||||
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
vfree(vgpu);
|
||||
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
vgpu->gvt = gvt;
|
||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
setup_vgpu_cfg_space(vgpu, param);
|
||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||
|
||||
ret = setup_vgpu_mmio(vgpu);
|
||||
ret = intel_vgpu_init_mmio(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_vgpu_alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
|
||||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
out_clean_idr:
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU
|
||||
* intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
|
||||
* @vgpu: virtual GPU
|
||||
* @dmlr: vGPU Device Model Level Reset or GT Reset
|
||||
* @engine_mask: engines to reset for GT reset
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU through
|
||||
* device model reset or GT reset. The caller should hold the gvt lock.
|
||||
*
|
||||
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
|
||||
* the whole vGPU to default state as when it is created. This vGPU function
|
||||
* is required both for functionary and security concerns.The ultimate goal
|
||||
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
|
||||
* assign a vGPU to a virtual machine we must isse such reset first.
|
||||
*
|
||||
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
|
||||
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
|
||||
* Unlike the FLR, GT reset only reset particular resource of a vGPU per
|
||||
* the reset request. Guest driver can issue a GT reset by programming the
|
||||
* virtual GDRST register to reset specific virtual GPU engine or all
|
||||
* engines.
|
||||
*
|
||||
* The parameter dev_level is to identify if we will do DMLR or GT reset.
|
||||
* The parameter engine_mask is to specific the engines that need to be
|
||||
* resetted. If value ALL_ENGINES is given for engine_mask, it means
|
||||
* the caller requests a full GT reset that we will reset all virtual
|
||||
* GPU engines. For FLR, engine_mask is ignored.
|
||||
*/
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
if (dmlr)
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU.
|
||||
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
*/
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
@@ -2966,6 +2966,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int ret;
|
||||
|
||||
if (!plane_state->base.visible)
|
||||
return 0;
|
||||
|
||||
/* Rotate src coordinates to match rotated GTT view */
|
||||
if (drm_rotation_90_or_270(rotation))
|
||||
drm_rect_rotate(&plane_state->base.src,
|
||||
|
@@ -970,18 +970,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
|
||||
uint32_t *batch,
|
||||
uint32_t index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/*
|
||||
* WaDisableLSQCROPERFforOCL:kbl
|
||||
* This WA is implemented in skl_init_clock_gating() but since
|
||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||
* set this bit here to retain the WA during flush.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
|
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FENCE_DEST_SLM_DISABLE);
|
||||
|
||||
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
|
||||
* involving this register should also be added to WA batch as required.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
/* WaDisableLSQCROPERFforOCL:kbl */
|
||||
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
|
||||
GEN8_LQSC_RO_PERF_DIS);
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:kbl */
|
||||
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
|
||||
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
|
||||
|
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_rect clip = { 0, };
|
||||
|
||||
if (!state->crtc)
|
||||
return 0;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
@@ -38,6 +38,11 @@
|
||||
* - TV Panel encoding via ENCT
|
||||
*/
|
||||
|
||||
/* HHI Registers */
|
||||
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
|
||||
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
|
||||
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
|
||||
|
||||
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
|
||||
.mode_tag = MESON_VENC_MODE_CVBS_PAL,
|
||||
.hso_begin = 3,
|
||||
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
|
||||
|
||||
void meson_venc_init(struct meson_drm *priv)
|
||||
{
|
||||
/* Disable CVBS VDAC */
|
||||
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
|
||||
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
|
||||
|
||||
/* Power Down Dacs */
|
||||
writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
|
||||
|
||||
/* Disable HDMI PHY */
|
||||
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
|
||||
|
||||
/* Disable HDMI */
|
||||
writel_bits_relaxed(0x3, 0,
|
||||
priv->io_base + _REG(VPU_HDMI_SETTING));
|
||||
|
||||
/* Disable all encoders */
|
||||
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
|
||||
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
|
||||
|
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
|
||||
|
||||
/* Disable CVBS VDAC */
|
||||
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
|
||||
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
|
||||
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
|
||||
}
|
||||
|
||||
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
|
||||
|
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
void adreno_flush(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
uint32_t wptr = get_wptr(gpu->rb);
|
||||
uint32_t wptr;
|
||||
|
||||
/*
|
||||
* Mask wptr value that we calculate to fit in the HW range. This is
|
||||
* to account for the possibility that the last command fit exactly into
|
||||
* the ringbuffer and rb->next hasn't wrapped to zero yet
|
||||
*/
|
||||
wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
|
||||
|
||||
/* ensure writes to ringbuffer have hit system memory: */
|
||||
mb();
|
||||
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
{
|
||||
struct adreno_platform_config *config = pdev->dev.platform_data;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
struct msm_mmu *mmu;
|
||||
int ret;
|
||||
|
||||
adreno_gpu->funcs = funcs;
|
||||
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu = gpu->aspace->mmu;
|
||||
if (mmu) {
|
||||
if (gpu->aspace && gpu->aspace->mmu) {
|
||||
struct msm_mmu *mmu = gpu->aspace->mmu;
|
||||
ret = mmu->funcs->attach(mmu, iommu_ports,
|
||||
ARRAY_SIZE(iommu_ports));
|
||||
if (ret)
|
||||
|
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
|
||||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
for_each_plane_in_state(state, plane, plane_state, i)
|
||||
mdp5_plane_complete_commit(plane, plane_state);
|
||||
|
||||
if (mdp5_kms->smp)
|
||||
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
|
||||
|
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
|
||||
|
||||
/* assigned by crtc blender */
|
||||
enum mdp_mixer_stage_id stage;
|
||||
|
||||
bool pending : 1;
|
||||
};
|
||||
#define to_mdp5_plane_state(x) \
|
||||
container_of(x, struct mdp5_plane_state, base)
|
||||
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
|
||||
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
|
||||
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
|
||||
|
||||
|
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
|
||||
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
|
||||
drm_printf(p, "\talpha=%u\n", pstate->alpha);
|
||||
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
|
||||
drm_printf(p, "\tpending=%u\n", pstate->pending);
|
||||
}
|
||||
|
||||
static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
|
||||
if (mdp5_state && mdp5_state->base.fb)
|
||||
drm_framebuffer_reference(mdp5_state->base.fb);
|
||||
|
||||
mdp5_state->pending = false;
|
||||
|
||||
return &mdp5_state->base;
|
||||
}
|
||||
|
||||
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
|
||||
DBG("%s: check (%d -> %d)", plane->name,
|
||||
plane_enabled(old_state), plane_enabled(state));
|
||||
|
||||
/* We don't allow faster-than-vblank updates.. if we did add this
|
||||
* some day, we would need to disallow in cases where hwpipe
|
||||
* changes
|
||||
*/
|
||||
if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
|
||||
return -EBUSY;
|
||||
|
||||
max_width = config->hw->lm.max_width << 16;
|
||||
max_height = config->hw->lm.max_height << 16;
|
||||
|
||||
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct drm_plane_state *state = plane->state;
|
||||
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
|
||||
|
||||
DBG("%s: update", plane->name);
|
||||
|
||||
mdp5_state->pending = true;
|
||||
|
||||
if (plane_enabled(state)) {
|
||||
int ret;
|
||||
|
||||
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
||||
return pstate->hwpipe->flush_mask;
|
||||
}
|
||||
|
||||
/* called after vsync in thread context */
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
|
||||
|
||||
pstate->pending = false;
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
|
||||
{
|
||||
|
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
|
||||
if (!priv->aspace[id])
|
||||
continue;
|
||||
msm_gem_unmap_vma(priv->aspace[id],
|
||||
&msm_obj->domain[id], msm_obj->sgt);
|
||||
}
|
||||
|
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
|
||||
pagefault_disable();
|
||||
}
|
||||
|
||||
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
|
||||
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
|
||||
!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
|
||||
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
{
|
||||
uint32_t i, last_offset = 0;
|
||||
uint32_t *ptr;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (offset % 4) {
|
||||
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
|
||||
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
|
||||
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
goto out;
|
||||
|
||||
if (submit_reloc.submit_offset % 4) {
|
||||
DRM_ERROR("non-aligned reloc offset: %u\n",
|
||||
submit_reloc.submit_offset);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* offset in dwords: */
|
||||
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
if ((off >= (obj->base.size / 4)) ||
|
||||
(off < last_offset)) {
|
||||
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (valid)
|
||||
continue;
|
||||
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
last_offset = off;
|
||||
}
|
||||
|
||||
out:
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void submit_cleanup(struct msm_gem_submit *submit)
|
||||
|
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
|
||||
struct msm_ringbuffer *ring;
|
||||
int ret;
|
||||
|
||||
size = ALIGN(size, 4); /* size should be dword aligned */
|
||||
if (WARN_ON(!is_power_of_2(size)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring) {
|
||||
|
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
|
||||
@@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/si58_mc.bin");
|
||||
|
||||
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
|
||||
static void si_pcie_gen3_enable(struct radeon_device *rdev);
|
||||
@@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
int err;
|
||||
int new_fw = 0;
|
||||
bool new_smc = false;
|
||||
bool si58_fw = false;
|
||||
bool banks2_fw = false;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
chip_name = "TAHITI";
|
||||
/* XXX: figure out which Tahitis need the new ucode */
|
||||
if (0)
|
||||
new_smc = true;
|
||||
new_chip_name = "tahiti";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
@@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
chip_name = "PITCAIRN";
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->device == 0x6810) ||
|
||||
(rdev->pdev->device == 0x6811) ||
|
||||
(rdev->pdev->device == 0x6816) ||
|
||||
(rdev->pdev->device == 0x6817) ||
|
||||
(rdev->pdev->device == 0x6806))
|
||||
if ((rdev->pdev->revision == 0x81) &&
|
||||
((rdev->pdev->device == 0x6810) ||
|
||||
(rdev->pdev->device == 0x6811)))
|
||||
new_smc = true;
|
||||
new_chip_name = "pitcairn";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
@@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
chip_name = "VERDE";
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6820) ||
|
||||
(rdev->pdev->device == 0x6821) ||
|
||||
(rdev->pdev->device == 0x6822) ||
|
||||
(rdev->pdev->device == 0x6823) ||
|
||||
(rdev->pdev->device == 0x682A) ||
|
||||
(rdev->pdev->device == 0x682B))
|
||||
if (((rdev->pdev->device == 0x6820) &&
|
||||
((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83))) ||
|
||||
((rdev->pdev->device == 0x6821) &&
|
||||
((rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87))) ||
|
||||
((rdev->pdev->revision == 0x87) &&
|
||||
((rdev->pdev->device == 0x6823) ||
|
||||
(rdev->pdev->device == 0x682b))))
|
||||
new_smc = true;
|
||||
new_chip_name = "verde";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
@@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
chip_name = "OLAND";
|
||||
if ((rdev->pdev->revision == 0xC7) ||
|
||||
(rdev->pdev->revision == 0x80) ||
|
||||
(rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605))
|
||||
if (((rdev->pdev->revision == 0x81) &&
|
||||
((rdev->pdev->device == 0x6600) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605) ||
|
||||
(rdev->pdev->device == 0x6610))) ||
|
||||
((rdev->pdev->revision == 0x83) &&
|
||||
(rdev->pdev->device == 0x6610)))
|
||||
new_smc = true;
|
||||
new_chip_name = "oland";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
@@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
chip_name = "HAINAN";
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0xC3) ||
|
||||
(rdev->pdev->device == 0x6664) ||
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667))
|
||||
if (((rdev->pdev->revision == 0x81) &&
|
||||
(rdev->pdev->device == 0x6660)) ||
|
||||
((rdev->pdev->revision == 0x83) &&
|
||||
((rdev->pdev->device == 0x6660) ||
|
||||
(rdev->pdev->device == 0x6663) ||
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667))))
|
||||
new_smc = true;
|
||||
else if ((rdev->pdev->revision == 0xc3) &&
|
||||
(rdev->pdev->device == 0x6665))
|
||||
banks2_fw = true;
|
||||
new_chip_name = "hainan";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
@@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
/* this memory configuration requires special firmware */
|
||||
if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
|
||||
si58_fw = true;
|
||||
|
||||
DRM_INFO("Loading %s Microcode\n", new_chip_name);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
|
||||
@@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
|
||||
if (si58_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
|
||||
@@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
if (new_smc)
|
||||
if (banks2_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
|
||||
else if (new_smc)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
|
||||
|
@@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
(rdev->pdev->device == 0x6817) ||
|
||||
(rdev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (rdev->family == CHIP_VERDE) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6820) ||
|
||||
(rdev->pdev->device == 0x6821) ||
|
||||
(rdev->pdev->device == 0x6822) ||
|
||||
(rdev->pdev->device == 0x6823) ||
|
||||
(rdev->pdev->device == 0x682A) ||
|
||||
(rdev->pdev->device == 0x682B)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->revision == 0xC7) ||
|
||||
(rdev->pdev->revision == 0x80) ||
|
||||
(rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_HAINAN) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
@@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
(rdev->pdev->device == 0x6665) ||
|
||||
(rdev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
|
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
|
||||
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct tilcdc_drm_private *priv = dev->dev_private;
|
||||
uint32_t stat;
|
||||
uint32_t stat, reg;
|
||||
|
||||
stat = tilcdc_read_irqstatus(dev);
|
||||
tilcdc_clear_irqstatus(dev, stat);
|
||||
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
|
||||
dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
|
||||
__func__, stat);
|
||||
tilcdc_crtc->frame_intact = false;
|
||||
if (tilcdc_crtc->sync_lost_count++ >
|
||||
SYNC_LOST_COUNT_LIMIT) {
|
||||
dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
|
||||
queue_work(system_wq, &tilcdc_crtc->recover_work);
|
||||
if (priv->rev == 1)
|
||||
if (priv->rev == 1) {
|
||||
reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
|
||||
if (reg & LCDC_RASTER_ENABLE) {
|
||||
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
|
||||
LCDC_V1_SYNC_LOST_INT_ENA);
|
||||
else
|
||||
LCDC_RASTER_ENABLE);
|
||||
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
|
||||
LCDC_RASTER_ENABLE);
|
||||
}
|
||||
} else {
|
||||
if (tilcdc_crtc->sync_lost_count++ >
|
||||
SYNC_LOST_COUNT_LIMIT) {
|
||||
dev_err(dev->dev,
|
||||
"%s(0x%08x): Sync lost flood detected, recovering",
|
||||
__func__, stat);
|
||||
queue_work(system_wq,
|
||||
&tilcdc_crtc->recover_work);
|
||||
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
|
||||
LCDC_SYNC_LOST);
|
||||
tilcdc_crtc->sync_lost_count = 0;
|
||||
tilcdc_crtc->sync_lost_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
||||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_base = obj->vmap;
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
|
Reference in New Issue
Block a user