Merge v5.3-rc1 into drm-misc-next
Noralf needs some SPI patches in 5.3 to merge some work on tinydrm. Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
此提交包含在:
@@ -35,6 +35,7 @@
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_fence_reg.h"
|
||||
#include "gvt.h"
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
@@ -128,10 +129,10 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
struct i915_fence_reg *reg;
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
@@ -163,13 +164,13 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
struct i915_fence_reg *reg;
|
||||
u32 i;
|
||||
|
||||
if (WARN_ON(!vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
@@ -180,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
|
||||
struct i915_fence_reg *reg;
|
||||
int i;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(rpm);
|
||||
|
||||
/* Request fences from host */
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
@@ -206,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
return 0;
|
||||
out_free_fence:
|
||||
gvt_vgpu_err("Failed to alloc fences\n");
|
||||
@@ -219,7 +221,7 @@ out_free_fence:
|
||||
vgpu->fence.regs[i] = NULL;
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(rpm);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@@ -315,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -1725,7 +1725,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
int ret = 0;
|
||||
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
|
||||
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
|
||||
unsigned long gma_start_offset = 0;
|
||||
unsigned long start_offset = 0;
|
||||
|
||||
/* get the start gm address of the batch buffer */
|
||||
gma = get_gma_bb_from_cmd(s, 1);
|
||||
@@ -1742,7 +1742,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
|
||||
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
|
||||
|
||||
/* the gma_start_offset stores the batch buffer's start gma's
|
||||
/* the start_offset stores the batch buffer's start gma's
|
||||
* offset relative to page boundary. so for non-privileged batch
|
||||
* buffer, the shadowed gem object holds exactly the same page
|
||||
* layout as original gem object. This is for the convience of
|
||||
@@ -1754,16 +1754,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
* that of shadowed page.
|
||||
*/
|
||||
if (bb->ppgtt)
|
||||
gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
|
||||
start_offset = gma & ~I915_GTT_PAGE_MASK;
|
||||
|
||||
bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
|
||||
roundup(bb_size + gma_start_offset, PAGE_SIZE));
|
||||
bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
|
||||
round_up(bb_size + start_offset,
|
||||
PAGE_SIZE));
|
||||
if (IS_ERR(bb->obj)) {
|
||||
ret = PTR_ERR(bb->obj);
|
||||
goto err_free_bb;
|
||||
}
|
||||
|
||||
ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
|
||||
ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
|
||||
if (ret)
|
||||
goto err_free_obj;
|
||||
|
||||
@@ -1780,7 +1781,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
|
||||
ret = copy_gma_to_hva(s->vgpu, mm,
|
||||
gma, gma + bb_size,
|
||||
bb->va + gma_start_offset);
|
||||
bb->va + start_offset);
|
||||
if (ret < 0) {
|
||||
gvt_vgpu_err("fail to copy guest ring buffer\n");
|
||||
ret = -EFAULT;
|
||||
@@ -1806,13 +1807,13 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
* buffer's gma in pair. After all, we don't want to pin the shadow
|
||||
* buffer here (too early).
|
||||
*/
|
||||
s->ip_va = bb->va + gma_start_offset;
|
||||
s->ip_va = bb->va + start_offset;
|
||||
s->ip_gma = gma;
|
||||
return 0;
|
||||
err_unmap:
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
err_finish_shmem_access:
|
||||
i915_gem_obj_finish_shmem_access(bb->obj);
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
err_free_obj:
|
||||
i915_gem_object_put(bb->obj);
|
||||
err_free_bb:
|
||||
@@ -2829,9 +2830,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
int ret = 0;
|
||||
void *map;
|
||||
|
||||
obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
|
||||
roundup(ctx_size + CACHELINE_BYTES,
|
||||
PAGE_SIZE));
|
||||
obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
|
||||
roundup(ctx_size + CACHELINE_BYTES,
|
||||
PAGE_SIZE));
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
@@ -2843,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
goto put_obj;
|
||||
}
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
|
||||
goto unmap_src;
|
||||
|
@@ -58,12 +58,12 @@ static int mmio_offset_compare(void *priv,
|
||||
static inline int mmio_diff_handler(struct intel_gvt *gvt,
|
||||
u32 offset, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_private *i915 = gvt->dev_priv;
|
||||
struct mmio_diff_param *param = data;
|
||||
struct diff_mmio *node;
|
||||
u32 preg, vreg;
|
||||
|
||||
preg = I915_READ_NOTRACE(_MMIO(offset));
|
||||
preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
|
||||
vreg = vgpu_vreg(param->vgpu, offset);
|
||||
|
||||
if (preg != vreg) {
|
||||
|
@@ -68,9 +68,10 @@ static struct bin_attribute firmware_attr = {
|
||||
|
||||
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_private *i915 = gvt->dev_priv;
|
||||
|
||||
*(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
|
||||
*(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
|
||||
_MMIO(offset));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -87,7 +87,7 @@ struct intel_vgpu_gm {
|
||||
|
||||
/* Fences owned by a vGPU */
|
||||
struct intel_vgpu_fence {
|
||||
struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
|
||||
struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
|
||||
u32 base;
|
||||
u32 size;
|
||||
};
|
||||
@@ -390,7 +390,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
|
||||
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
|
||||
+ gvt_hidden_sz(gvt) - 1)
|
||||
|
||||
#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
|
||||
#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
|
||||
|
||||
/* Aperture/GM space definitions for vGPU */
|
||||
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
|
||||
@@ -584,12 +584,12 @@ enum {
|
||||
|
||||
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -1254,18 +1254,15 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
|
||||
static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
u32 data = *(u32 *)p_data;
|
||||
bool invalid_write = false;
|
||||
|
||||
switch (offset) {
|
||||
case _vgtif_reg(display_ready):
|
||||
send_display_ready_uevent(vgpu, data ? 1 : 0);
|
||||
break;
|
||||
case _vgtif_reg(g2v_notify):
|
||||
ret = handle_g2v_notification(vgpu, data);
|
||||
handle_g2v_notification(vgpu, data);
|
||||
break;
|
||||
/* add xhot and yhot to handled list to avoid error log */
|
||||
case _vgtif_reg(cursor_x_hot):
|
||||
@@ -1282,13 +1279,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
case _vgtif_reg(execlist_context_descriptor_hi):
|
||||
break;
|
||||
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
|
||||
invalid_write = true;
|
||||
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
|
||||
break;
|
||||
default:
|
||||
invalid_write = true;
|
||||
gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
|
||||
offset, bytes, data);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!invalid_write)
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -34,6 +34,7 @@
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gvt.h"
|
||||
#include "trace.h"
|
||||
|
||||
|
@@ -30,7 +30,7 @@
|
||||
* not do like this.
|
||||
*/
|
||||
#define _INTEL_BIOS_PRIVATE
|
||||
#include "intel_vbt_defs.h"
|
||||
#include "display/intel_vbt_defs.h"
|
||||
|
||||
#define OPREGION_SIGNATURE "IntelGraphicsMem"
|
||||
#define MBOX_VBT (1<<3)
|
||||
|
@@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
@@ -35,8 +35,11 @@
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_pm.h"
|
||||
#include "gt/intel_context.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_pm.h"
|
||||
#include "gvt.h"
|
||||
|
||||
#define RING_CTX_OFF(x) \
|
||||
@@ -365,18 +368,20 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_vgpu_mm *mm = workload->shadow_mm;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
|
||||
int i = 0;
|
||||
|
||||
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
|
||||
return -EINVAL;
|
||||
|
||||
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
|
||||
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
|
||||
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
|
||||
} else {
|
||||
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
|
||||
px_dma(ppgtt->pdp.page_directory[i]) =
|
||||
mm->ppgtt_mm.shadow_pdps[i];
|
||||
struct i915_page_directory * const pd =
|
||||
i915_pd_entry(ppgtt->pd, i);
|
||||
|
||||
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -482,7 +487,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
bb->obj->base.size);
|
||||
bb->clflush &= ~CLFLUSH_AFTER;
|
||||
}
|
||||
i915_gem_obj_finish_shmem_access(bb->obj);
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
bb->accessing = false;
|
||||
|
||||
} else {
|
||||
@@ -506,18 +511,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(bb->obj,
|
||||
false);
|
||||
false);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
i915_gem_obj_finish_shmem_access(bb->obj);
|
||||
bb->accessing = false;
|
||||
|
||||
ret = i915_vma_move_to_active(bb->vma,
|
||||
workload->req,
|
||||
0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
bb->accessing = false;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -588,7 +593,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
|
||||
if (bb->obj) {
|
||||
if (bb->accessing)
|
||||
i915_gem_obj_finish_shmem_access(bb->obj);
|
||||
i915_gem_object_finish_access(bb->obj);
|
||||
|
||||
if (bb->va && !IS_ERR(bb->va))
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
@@ -597,7 +602,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
i915_vma_unpin(bb->vma);
|
||||
i915_vma_close(bb->vma);
|
||||
}
|
||||
__i915_gem_object_release_unless_active(bb->obj);
|
||||
i915_gem_object_put(bb->obj);
|
||||
}
|
||||
list_del(&bb->list);
|
||||
kfree(bb);
|
||||
@@ -1120,16 +1125,19 @@ err:
|
||||
|
||||
static void
|
||||
i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
|
||||
struct i915_hw_ppgtt *ppgtt)
|
||||
struct i915_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
px_dma(&ppgtt->pml4) = s->i915_context_pml4;
|
||||
px_dma(ppgtt->pd) = s->i915_context_pml4;
|
||||
} else {
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++)
|
||||
px_dma(ppgtt->pdp.page_directory[i]) =
|
||||
s->i915_context_pdps[i];
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
|
||||
struct i915_page_directory * const pd =
|
||||
i915_pd_entry(ppgtt->pd, i);
|
||||
|
||||
px_dma(pd) = s->i915_context_pdps[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1148,7 +1156,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
||||
|
||||
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
|
||||
|
||||
i915_context_ppgtt_root_restore(s, s->shadow[0]->gem_context->ppgtt);
|
||||
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, id)
|
||||
intel_context_unpin(s->shadow[id]);
|
||||
|
||||
@@ -1178,16 +1186,19 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
||||
|
||||
static void
|
||||
i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
|
||||
struct i915_hw_ppgtt *ppgtt)
|
||||
struct i915_ppgtt *ppgtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
s->i915_context_pml4 = px_dma(&ppgtt->pml4);
|
||||
s->i915_context_pml4 = px_dma(ppgtt->pd);
|
||||
} else {
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++)
|
||||
s->i915_context_pdps[i] =
|
||||
px_dma(ppgtt->pdp.page_directory[i]);
|
||||
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
|
||||
struct i915_page_directory * const pd =
|
||||
i915_pd_entry(ppgtt->pd, i);
|
||||
|
||||
s->i915_context_pdps[i] = px_dma(pd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1213,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
i915_context_ppgtt_root_save(s, ctx->ppgtt);
|
||||
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
|
||||
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
struct intel_context *ce;
|
||||
@@ -1256,7 +1267,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
return 0;
|
||||
|
||||
out_shadow_ctx:
|
||||
i915_context_ppgtt_root_restore(s, ctx->ppgtt);
|
||||
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
|
||||
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
||||
if (IS_ERR(s->shadow[i]))
|
||||
break;
|
||||
@@ -1523,11 +1534,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||
* as there is only one pre-allocated buf-obj for shadow.
|
||||
*/
|
||||
if (list_empty(workload_q_head(vgpu, ring_id))) {
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put_unchecked(dev_priv);
|
||||
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
新增問題並參考
封鎖使用者