Merge tag 'gvt-next-2017-06-08' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2017-06-08 First gvt-next pull for 4.13: - optimization for per-VM mmio save/restore (Changbin) - optimization for mmio hash table (Changbin) - scheduler optimization with event (Ping) - vGPU reset refinement (Fred) - other misc refactor and cleanups, etc. Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170608093547.bjgs436e3iokrzdm@zhen-hp.sh.intel.com
This commit is contained in:
@@ -47,21 +47,6 @@
|
||||
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
|
||||
#define PCH_PP_DIVISOR _MMIO(0xc7210)
|
||||
|
||||
/* Register contains RO bits */
|
||||
#define F_RO (1 << 0)
|
||||
/* Register contains graphics address */
|
||||
#define F_GMADR (1 << 1)
|
||||
/* Mode mask registers with high 16 bits as the mask bits */
|
||||
#define F_MODE_MASK (1 << 2)
|
||||
/* This reg can be accessed by GPU commands */
|
||||
#define F_CMD_ACCESS (1 << 3)
|
||||
/* This reg has been accessed by a VM */
|
||||
#define F_ACCESSED (1 << 4)
|
||||
/* This reg has been accessed through GPU commands */
|
||||
#define F_CMD_ACCESSED (1 << 5)
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
|
||||
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
|
||||
{
|
||||
if (IS_BROADWELL(gvt->dev_priv))
|
||||
@@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
|
||||
}
|
||||
|
||||
static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct intel_gvt_mmio_info *e;
|
||||
|
||||
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
|
||||
if (e->offset == offset)
|
||||
return e;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 offset, u8 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
gvt_mmio_func read, gvt_mmio_func write)
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
@@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
return -ENOMEM;
|
||||
|
||||
info->offset = i;
|
||||
p = intel_gvt_find_mmio_info(gvt, info->offset);
|
||||
p = find_mmio_info(gvt, info->offset);
|
||||
if (p)
|
||||
gvt_err("dup mmio definition offset %x\n",
|
||||
info->offset);
|
||||
info->size = size;
|
||||
info->length = (i + 4) < end ? 4 : (end - i);
|
||||
info->addr_mask = addr_mask;
|
||||
|
||||
info->ro_mask = ro_mask;
|
||||
info->device = device;
|
||||
info->read = read ? read : intel_vgpu_default_mmio_read;
|
||||
@@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
gvt->mmio.mmio_attribute[info->offset / 4] = flags;
|
||||
INIT_HLIST_NODE(&info->node);
|
||||
hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
|
||||
gvt->mmio.num_tracked_mmio++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
|
||||
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
unsigned int fence_num = offset_to_fence_num(off);
|
||||
int ret;
|
||||
|
||||
@@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
return ret;
|
||||
write_vreg(vgpu, off, p_data, bytes);
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
intel_vgpu_write_fence(vgpu, fence_num,
|
||||
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
/* sw will wait for the device to ack the reset request */
|
||||
vgpu_vreg(vgpu, offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
/* TRTTE is not per-context */
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
if (val & 1) {
|
||||
/* unblock hw logic */
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), val);
|
||||
mmio_hw_access_post(dev_priv);
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
return 0;
|
||||
@@ -1405,7 +1411,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
static int instdone_mmio_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
@@ -1424,7 +1443,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
|
||||
if (execlist->elsp_dwords.index == 3) {
|
||||
vgpu->last_ctx_submit_time = ktime_get();
|
||||
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
||||
if(ret)
|
||||
gvt_vgpu_err("fail submit workload on ring %d\n",
|
||||
@@ -1593,6 +1611,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x6c)
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
|
||||
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
|
||||
#undef RING_REG
|
||||
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
|
||||
|
||||
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
|
||||
@@ -1768,10 +1792,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
|
||||
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
|
||||
|
||||
MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
|
||||
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
|
||||
MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
|
||||
@@ -2176,7 +2196,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
|
||||
MMIO_D(ECOBUS, D_ALL);
|
||||
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
|
||||
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
|
||||
@@ -2208,22 +2228,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_PMINTRMSK, D_ALL);
|
||||
MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
|
||||
MMIO_D(RSTDBYCTL, D_ALL);
|
||||
|
||||
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
|
||||
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
|
||||
MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
|
||||
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
|
||||
|
||||
MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(TILECTL, D_ALL);
|
||||
|
||||
MMIO_D(GEN6_UCGCTL1, D_ALL);
|
||||
@@ -2231,7 +2248,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
|
||||
MMIO_D(GEN6_PCODE_DATA, D_ALL);
|
||||
MMIO_D(0x13812c, D_ALL);
|
||||
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
|
||||
@@ -2310,14 +2326,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x1a054, D_ALL);
|
||||
|
||||
MMIO_D(0x44070, D_ALL);
|
||||
MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_D(0x2b00, D_BDW_PLUS);
|
||||
MMIO_D(0x2360, D_BDW_PLUS);
|
||||
MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
@@ -2754,7 +2769,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x72380, D_SKL_PLUS);
|
||||
MMIO_D(0x7039c, D_SKL_PLUS);
|
||||
|
||||
MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_D(0x8f074, D_SKL | D_KBL);
|
||||
MMIO_D(0x8f004, D_SKL | D_KBL);
|
||||
MMIO_D(0x8f034, D_SKL | D_KBL);
|
||||
@@ -2828,26 +2842,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
|
||||
* @gvt: GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* This function is used to find the MMIO information entry from hash table
|
||||
*
|
||||
* Returns:
|
||||
* pointer to MMIO information entry, NULL if not exists
|
||||
*/
|
||||
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
/* Special MMIO blocks. */
|
||||
static struct gvt_mmio_block {
|
||||
unsigned int device;
|
||||
i915_reg_t offset;
|
||||
unsigned int size;
|
||||
gvt_mmio_func read;
|
||||
gvt_mmio_func write;
|
||||
} gvt_mmio_blocks[] = {
|
||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
||||
};
|
||||
|
||||
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct intel_gvt_mmio_info *e;
|
||||
unsigned long device = intel_gvt_get_device_type(gvt);
|
||||
struct gvt_mmio_block *block = gvt_mmio_blocks;
|
||||
int i;
|
||||
|
||||
WARN_ON(!IS_ALIGNED(offset, 4));
|
||||
|
||||
hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
|
||||
if (e->offset == offset)
|
||||
return e;
|
||||
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
|
||||
if (!(device & block->device))
|
||||
continue;
|
||||
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
|
||||
offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
|
||||
return block;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@@ -2887,9 +2911,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
|
||||
int ret;
|
||||
|
||||
gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
|
||||
gvt->mmio.mmio_attribute = vzalloc(size);
|
||||
if (!gvt->mmio.mmio_attribute)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2910,77 +2935,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
gvt_dbg_mmio("traced %u virtual mmio registers\n",
|
||||
gvt->mmio.num_tracked_mmio);
|
||||
return 0;
|
||||
err:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |=
|
||||
F_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_CMD_ACCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_UNALIGN;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |=
|
||||
F_CMD_ACCESSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* Returns:
|
||||
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
|
||||
*
|
||||
*/
|
||||
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] &
|
||||
F_MODE_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_default_mmio_read - default MMIO read handler
|
||||
@@ -3032,3 +2995,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
||||
{
|
||||
return in_whitelist(offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
|
||||
* @vgpu: a vGPU
|
||||
* @offset: register offset
|
||||
* @pdata: data buffer
|
||||
* @bytes: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *pdata, unsigned int bytes, bool is_read)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio_info;
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
gvt_mmio_func func;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Handle special MMIO blocks.
|
||||
*/
|
||||
mmio_block = find_mmio_block(gvt, offset);
|
||||
if (mmio_block) {
|
||||
func = is_read ? mmio_block->read : mmio_block->write;
|
||||
if (func)
|
||||
return func(vgpu, offset, pdata, bytes);
|
||||
goto default_rw;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normal tracked MMIOs.
|
||||
*/
|
||||
mmio_info = find_mmio_info(gvt, offset);
|
||||
if (!mmio_info) {
|
||||
if (!vgpu->mmio.disable_warn_untrack)
|
||||
gvt_vgpu_err("untracked MMIO %08x len %d\n",
|
||||
offset, bytes);
|
||||
goto default_rw;
|
||||
}
|
||||
|
||||
if (is_read)
|
||||
return mmio_info->read(vgpu, offset, pdata, bytes);
|
||||
else {
|
||||
u64 ro_mask = mmio_info->ro_mask;
|
||||
u32 old_vreg = 0, old_sreg = 0;
|
||||
u64 data = 0;
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
old_sreg = vgpu_sreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if (likely(!ro_mask))
|
||||
ret = mmio_info->write(vgpu, offset, pdata, bytes);
|
||||
else if (!~ro_mask) {
|
||||
gvt_vgpu_err("try to write RO reg %x\n", offset);
|
||||
return 0;
|
||||
} else {
|
||||
/* keep the RO bits in the virtual register */
|
||||
memcpy(&data, pdata, bytes);
|
||||
data &= ~ro_mask;
|
||||
data |= vgpu_vreg(vgpu, offset) & ro_mask;
|
||||
ret = mmio_info->write(vgpu, offset, &data, bytes);
|
||||
}
|
||||
|
||||
/* higher 16bits of mode ctl regs are mask bits for change */
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
|
||||
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
|
||||
| (vgpu_vreg(vgpu, offset) & mask);
|
||||
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
|
||||
| (vgpu_sreg(vgpu, offset) & mask);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
default_rw:
|
||||
return is_read ?
|
||||
intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
|
||||
intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
|
||||
}
|
||||
|
Reference in New Issue
Block a user