BackMerge v4.18-rc7 into drm-next

rmk requested this for armada and I think we've had a few
conflicts build up.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Cette révision appartient à :
Dave Airlie
2018-07-30 10:39:22 +10:00
révision 3fce461827
865 fichiers modifiés avec 8464 ajouts et 4522 suppressions

Voir le fichier

@@ -863,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -895,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
/* TODO
* Right now only scan LRI command on KBL and in inhibit context.
* It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL.
*/
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
u32 data = cmd_val(s, index + 1);
if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
intel_vgpu_mask_mmio_write(vgpu,
offset, &data, 4);
else
vgpu_vreg(vgpu, offset) = data;
}
}
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;

Voir le fichier

@@ -219,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -239,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -259,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {

Voir le fichier

@@ -1901,6 +1901,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1925,6 +1926,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -2177,6 +2179,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
* write, we assume the two 4 bytes writes are consecutive.
* Otherwise, we abort and report error
*/
if (bytes < info->gtt_entry_size) {
if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
/* the first partial part*/
ggtt_mm->ggtt_mm.last_partial_off = off;
ggtt_mm->ggtt_mm.last_partial_data = e.val64;
return 0;
} else if ((g_gtt_index ==
(ggtt_mm->ggtt_mm.last_partial_off >>
info->gtt_entry_size_shift)) &&
(off != ggtt_mm->ggtt_mm.last_partial_off)) {
/* the second partial part */
int last_off = ggtt_mm->ggtt_mm.last_partial_off &
(info->gtt_entry_size - 1);
memcpy((void *)&e.val64 + last_off,
(void *)&ggtt_mm->ggtt_mm.last_partial_data +
last_off, bytes);
ggtt_mm->ggtt_mm.last_partial_off = -1UL;
} else {
int last_offset;
gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
ggtt_mm->ggtt_mm.last_partial_off, off,
bytes, info->gtt_entry_size);
/* set host ggtt entry to scratch page and clear
* virtual ggtt entry as not present for last
* partially write offset
*/
last_offset = ggtt_mm->ggtt_mm.last_partial_off &
(~(info->gtt_entry_size - 1));
ggtt_get_host_entry(ggtt_mm, &m, last_offset);
ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
ggtt_set_host_entry(ggtt_mm, &m, last_offset);
ggtt_invalidate(gvt->dev_priv);
ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
ops->clear_present(&e);
ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
ggtt_mm->ggtt_mm.last_partial_off = off;
ggtt_mm->ggtt_mm.last_partial_data = e.val64;
return 0;
}
}
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;

Voir le fichier

@@ -157,6 +157,8 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
unsigned long last_partial_off;
u64 last_partial_data;
} ggtt_mm;
};
};

Voir le fichier

@@ -274,6 +274,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
/* This reg is saved/restored in context */
#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -655,6 +657,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
/**
* intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a in-context mask, false if it isn't.
*
*/
static inline bool intel_gvt_mmio_is_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
}
/**
* intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
}
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);

Voir le fichier

@@ -3387,6 +3387,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
/**
* intel_vgpu_mask_mmio_write - write mask register
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
* @bytes: access data length
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 mask, old_vreg;
old_vreg = vgpu_vreg(vgpu, offset);
write_vreg(vgpu, offset, p_data, bytes);
mask = vgpu_vreg(vgpu, offset) >> 16;
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
(vgpu_vreg(vgpu, offset) & mask);
return 0;
}
/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register

Voir le fichier

@@ -99,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
#endif

Voir le fichier

@@ -587,7 +587,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context)
if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
}
}