Merge drm/drm-next into drm-intel-next-queued
Although there's nothing crucial missing, it's been a long time since the last backmerge. Catch up with drm-next. Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
||||
unsigned int tiling_mode = 0;
|
||||
unsigned int stride = 0;
|
||||
|
||||
switch (info->drm_format_mod << 10) {
|
||||
case PLANE_CTL_TILED_LINEAR:
|
||||
switch (info->drm_format_mod) {
|
||||
case DRM_FORMAT_MOD_LINEAR:
|
||||
tiling_mode = I915_TILING_NONE;
|
||||
break;
|
||||
case PLANE_CTL_TILED_X:
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
tiling_mode = I915_TILING_X;
|
||||
stride = info->stride;
|
||||
break;
|
||||
case PLANE_CTL_TILED_Y:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
case I915_FORMAT_MOD_Yf_TILED:
|
||||
tiling_mode = I915_TILING_Y;
|
||||
stride = info->stride;
|
||||
break;
|
||||
default:
|
||||
gvt_dbg_core("not supported tiling mode\n");
|
||||
gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
|
||||
info->drm_format_mod);
|
||||
}
|
||||
obj->tiling_and_stride = tiling_mode | stride;
|
||||
} else {
|
||||
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
||||
info->height = p.height;
|
||||
info->stride = p.stride;
|
||||
info->drm_format = p.drm_format;
|
||||
info->drm_format_mod = p.tiled;
|
||||
|
||||
switch (p.tiled) {
|
||||
case PLANE_CTL_TILED_LINEAR:
|
||||
info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
|
||||
break;
|
||||
case PLANE_CTL_TILED_X:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
|
||||
break;
|
||||
case PLANE_CTL_TILED_Y:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
|
||||
break;
|
||||
case PLANE_CTL_TILED_YF:
|
||||
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
|
||||
}
|
||||
|
||||
info->size = (((p.stride * p.height * p.bpp) / 8) +
|
||||
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
(PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
|
||||
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
|
||||
if (ret)
|
||||
|
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
||||
if (IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) {
|
||||
plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
|
||||
_PLANE_CTL_TILED_SHIFT;
|
||||
plane->tiled = val & PLANE_CTL_TILED_MASK;
|
||||
fmt = skl_format_to_drm(
|
||||
val & PLANE_CTL_FORMAT_MASK,
|
||||
val & PLANE_CTL_ORDER_RGBX,
|
||||
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
|
||||
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
|
||||
(IS_SKYLAKE(dev_priv)
|
||||
|| IS_KABYLAKE(dev_priv)
|
||||
|| IS_BROXTON(dev_priv)) ?
|
||||
|
@@ -101,7 +101,7 @@ struct intel_gvt;
|
||||
/* color space conversion and gamma correction are not included */
|
||||
struct intel_vgpu_primary_plane_format {
|
||||
u8 enabled; /* plane is enabled */
|
||||
u8 tiled; /* X-tiled */
|
||||
u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
|
||||
u8 bpp; /* bits per pixel */
|
||||
u32 hw_format; /* format field in the PRI_CTL register */
|
||||
u32 drm_format; /* format in DRM definition */
|
||||
|
@@ -1297,6 +1297,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
|
||||
vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
@@ -1526,9 +1539,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
|
||||
u32 v = *(u32 *)p_data;
|
||||
u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
|
||||
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
|
||||
switch (offset) {
|
||||
case _PHY_CTL_FAMILY_EDP:
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
|
||||
break;
|
||||
case _PHY_CTL_FAMILY_DDI:
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
|
||||
vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
|
||||
break;
|
||||
}
|
||||
|
||||
vgpu_vreg(vgpu, offset) = v;
|
||||
|
||||
@@ -2801,6 +2820,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
|
||||
|
||||
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
|
||||
|
||||
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
|
||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
@@ -2976,8 +2997,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
NULL, gen9_trtte_write);
|
||||
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
|
||||
|
||||
MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
|
||||
@@ -3014,7 +3033,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(_MMIO(0x4ab8), D_KBL);
|
||||
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
|
||||
@@ -3178,6 +3199,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
|
||||
|
||||
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
|
||||
MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
|
||||
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
|
||||
info = (struct kvmgt_guest_info *)handle;
|
||||
kvm = info->kvm;
|
||||
|
||||
if (kthread)
|
||||
if (kthread) {
|
||||
if (!mmget_not_zero(kvm->mm))
|
||||
return -EFAULT;
|
||||
use_mm(kvm->mm);
|
||||
}
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
|
||||
kvm_read_guest(kvm, gpa, buf, len);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
if (kthread)
|
||||
if (kthread) {
|
||||
unuse_mm(kvm->mm);
|
||||
mmput(kvm->mm);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
struct kvm *kvm;
|
||||
int idx;
|
||||
bool ret;
|
||||
|
||||
if (!handle_valid(handle))
|
||||
return false;
|
||||
@@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
|
||||
info = (struct kvmgt_guest_info *)handle;
|
||||
kvm = info->kvm;
|
||||
|
||||
return kvm_is_visible_gfn(kvm, gfn);
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
ret = kvm_is_visible_gfn(kvm, gfn);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct intel_gvt_mpt kvmgt_mpt = {
|
||||
|
@@ -245,6 +245,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
|
||||
if (IS_BROXTON(vgpu->gvt->dev_priv)) {
|
||||
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
|
||||
~(BIT(0) | BIT(1));
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
|
||||
~PHY_POWER_GOOD;
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
|
||||
~PHY_POWER_GOOD;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
|
||||
~BIT(30);
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
|
||||
~BIT(30);
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
|
||||
~BXT_PHY_LANE_ENABLED;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
|
||||
BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_POWERDOWN_ACK;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
|
||||
~BXT_PHY_LANE_ENABLED;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
|
||||
BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_POWERDOWN_ACK;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
|
||||
~BXT_PHY_LANE_ENABLED;
|
||||
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
|
||||
BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_POWERDOWN_ACK;
|
||||
}
|
||||
} else {
|
||||
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
|
||||
/* only reset the engine related, so starting with 0x44200
|
||||
|
@@ -549,11 +549,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
* performace for batch mmio read/write, so we need
|
||||
* handle forcewake mannually.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
switch_mmio(pre, next, ring_id);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -42,8 +42,6 @@
|
||||
#define DEVICE_TYPE_EFP3 0x20
|
||||
#define DEVICE_TYPE_EFP4 0x10
|
||||
|
||||
#define DEV_SIZE 38
|
||||
|
||||
struct opregion_header {
|
||||
u8 signature[16];
|
||||
u32 size;
|
||||
@@ -63,6 +61,10 @@ struct bdb_data_header {
|
||||
u16 size; /* data size */
|
||||
} __packed;
|
||||
|
||||
/* For supporting windows guest with opregion, here hardcode the emulated
|
||||
* bdb header version as '186', and the corresponding child_device_config
|
||||
* length should be '33' but not '38'.
|
||||
*/
|
||||
struct efp_child_device_config {
|
||||
u16 handle;
|
||||
u16 device_type;
|
||||
@@ -109,12 +111,6 @@ struct efp_child_device_config {
|
||||
u8 mipi_bridge_type; /* 171 */
|
||||
u16 device_class_ext;
|
||||
u8 dvo_function;
|
||||
u8 dp_usb_type_c:1; /* 195 */
|
||||
u8 skip6:7;
|
||||
u8 dp_usb_type_c_2x_gpio_index; /* 195 */
|
||||
u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
|
||||
u8 iboost_dp:4; /* 196 */
|
||||
u8 iboost_hdmi:4; /* 196 */
|
||||
} __packed;
|
||||
|
||||
struct vbt {
|
||||
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
|
||||
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
|
||||
|
||||
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
|
||||
v->bdb_header.version = 186; /* child_dev_size = 38 */
|
||||
v->bdb_header.version = 186; /* child_dev_size = 33 */
|
||||
v->bdb_header.header_size = sizeof(v->bdb_header);
|
||||
|
||||
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
|
||||
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
|
||||
|
||||
/* child device */
|
||||
num_child = 4; /* each port has one child */
|
||||
v->general_definitions.child_dev_size =
|
||||
sizeof(struct efp_child_device_config);
|
||||
v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
|
||||
/* size will include child devices */
|
||||
v->general_definitions_header.size =
|
||||
sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
|
||||
v->general_definitions.child_dev_size = DEV_SIZE;
|
||||
sizeof(struct bdb_general_definitions) +
|
||||
num_child * v->general_definitions.child_dev_size;
|
||||
|
||||
/* portA */
|
||||
v->child0.handle = DEVICE_TYPE_EFP1;
|
||||
|
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We give 2 seconds higher prio for vGPU during start */
|
||||
#define GVT_SCHED_VGPU_PRI_TIME 2
|
||||
|
||||
struct vgpu_sched_data {
|
||||
struct list_head lru_list;
|
||||
struct intel_vgpu *vgpu;
|
||||
bool active;
|
||||
|
||||
bool pri_sched;
|
||||
ktime_t pri_time;
|
||||
ktime_t sched_in_time;
|
||||
ktime_t sched_time;
|
||||
ktime_t left_ts;
|
||||
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
|
||||
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
|
||||
continue;
|
||||
|
||||
if (vgpu_data->pri_sched) {
|
||||
if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
|
||||
vgpu = vgpu_data->vgpu;
|
||||
break;
|
||||
} else
|
||||
vgpu_data->pri_sched = false;
|
||||
}
|
||||
|
||||
/* Return the vGPU only if it has time slice left */
|
||||
if (vgpu_data->left_ts > 0) {
|
||||
vgpu = vgpu_data->vgpu;
|
||||
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct vgpu_sched_data *vgpu_data;
|
||||
struct intel_vgpu *vgpu = NULL;
|
||||
|
||||
/* no active vgpu or has already had a target */
|
||||
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
|
||||
goto out;
|
||||
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
|
||||
vgpu = find_busy_vgpu(sched_data);
|
||||
if (vgpu) {
|
||||
scheduler->next_vgpu = vgpu;
|
||||
|
||||
/* Move the last used vGPU to the tail of lru_list */
|
||||
vgpu_data = vgpu->sched_data;
|
||||
list_del_init(&vgpu_data->lru_list);
|
||||
list_add_tail(&vgpu_data->lru_list,
|
||||
&sched_data->lru_runq_head);
|
||||
if (!vgpu_data->pri_sched) {
|
||||
/* Move the last used vGPU to the tail of lru_list */
|
||||
list_del_init(&vgpu_data->lru_list);
|
||||
list_add_tail(&vgpu_data->lru_list,
|
||||
&sched_data->lru_runq_head);
|
||||
}
|
||||
} else {
|
||||
scheduler->next_vgpu = gvt->idle_vgpu;
|
||||
}
|
||||
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
ktime_t now;
|
||||
|
||||
if (!list_empty(&vgpu_data->lru_list))
|
||||
return;
|
||||
|
||||
list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
|
||||
now = ktime_get();
|
||||
vgpu_data->pri_time = ktime_add(now,
|
||||
ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
|
||||
vgpu_data->pri_sched = true;
|
||||
|
||||
list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
|
||||
|
||||
if (!hrtimer_active(&sched_data->timer))
|
||||
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
|
||||
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
&vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (!vgpu_data->active)
|
||||
return;
|
||||
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
intel_vgpu_clean_submission(vgpu);
|
||||
intel_vgpu_clean_display(vgpu);
|
||||
intel_vgpu_clean_opregion(vgpu);
|
||||
intel_vgpu_reset_ggtt(vgpu, true);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
|
Reference in New Issue
Block a user