Merge tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm pull request for 4.14 merge window. I'm sending this early, as my continuing journey into fatherhood is occurring really soon now, I'm going to be mostly useless for the next couple of weeks, though I may be able to read email, I doubt I'll be doing much patch applications or git sending. If anything urgent pops up I've asked Daniel/Jani/Alex/Sean to try and direct stuff towards you. Outside drm changes: Some rcar-du updates that touch the V4L tree, all acks should be in place. It adds one export to the radix tree code for new i915 use case. There are some minor AGP cleanups (don't see that too often). Changes to the vbox driver in staging to avoid breaking compilation. Summary: core: - Atomic helper fixes - Atomic UAPI fixes - Add YCBCR 4:2:0 support - Drop set_busid hook - Refactor fb_helper locking - Remove a bunch of internal APIs - Add a bunch of better default handlers - Format modifier/blob plane property added - More internal header refactoring - Make more internal API names consistent - Enhanced syncobj APIs (wait/signal/reset/create signalled) bridge: - Add Synopsys Designware MIPI DSI host bridge driver tiny: - Add Pervasive Displays RePaper displays - Add support for LEGO MINDSTORMS EV3 LCD i915: - Lots of GEN10/CNL support patches - drm syncobj support - Skylake+ watermark refactoring - GVT vGPU 48-bit ppgtt support - GVT performance improvements - NOA change ioctl - CCS (color compression) scanout support - GPU reset improvements amdgpu: - Initial hugepage support - BO migration logic rework - Vega10 improvements - Powerplay fixes - Stop reprogramming the MC - Fixes for ACP audio on stoney - SR-IOV fixes/improvements - Command submission overhead improvements amdkfd: - Non-dGPU upstreaming patches - Scratch VA ioctl - Image tiling modes - Update PM4 headers for new firmware - Drop all BUG_ONs. nouveau: - GP108 modesetting support. - Disable MSI on big endian. vmwgfx: - Add fence fd support. msm: - Runtime PM improvements exynos: - NV12MT support - Refactor KMS drivers imx-drm: - Lock scanout channel to improve memory bw - Cleanups etnaviv: - GEM object population fixes tegra: - Prep work for Tegra186 support - PRIME mmap support sunxi: - HDMI support improvements - HDMI CEC support omapdrm: - HDMI hotplug IRQ support - Big driver cleanup - OMAP5 DSI support rcar-du: - vblank fixes - VSP1 updates arcgpu: - Minor fixes stm: - Add STM32 DSI controller driver dw_hdmi: - Add support for Rockchip RK3399 - HDMI CEC support atmel-hlcdc: - Add 8-bit color support vc4: - Atomic fixes - New ioctl to attach a label to a buffer object - HDMI CEC support - Allow userspace to dictate rendering order on submit ioctl" * tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux: (1074 commits) drm/syncobj: Add a signal ioctl (v3) drm/syncobj: Add a reset ioctl (v3) drm/syncobj: Add a syncobj_array_find helper drm/syncobj: Allow wait for submit and signal behavior (v5) drm/syncobj: Add a CREATE_SIGNALED flag drm/syncobj: Add a callback mechanism for replace_fence (v3) drm/syncobj: add sync obj wait interface. (v8) i915: Use drm_syncobj_fence_get drm/syncobj: Add a race-free drm_syncobj_fence_get helper (v2) drm/syncobj: Rename fence_get to find_fence drm: kirin: Add mode_valid logic to avoid mode clocks we can't generate drm/vmwgfx: Bump the version for fence FD support drm/vmwgfx: Add export fence to file descriptor support drm/vmwgfx: Add support for imported Fence File Descriptor drm/vmwgfx: Prepare to support fence fd drm/vmwgfx: Fix incorrect command header offset at restart drm/vmwgfx: Support the NOP_ERROR command drm/vmwgfx: Restart command buffers after errors drm/vmwgfx: Move irq bottom half processing to threads drm/vmwgfx: Don't use drm_irq_[un]install ...
This commit is contained in:
@@ -21,6 +21,7 @@ config DRM_I915
|
||||
select ACPI_BUTTON if ACPI
|
||||
select SYNC_FILE
|
||||
select IOSF_MBI
|
||||
select CRC32
|
||||
help
|
||||
Choose this option if you have a system that has "Intel Graphics
|
||||
Media Accelerator" or "HD Graphics" integrated graphics,
|
||||
|
@@ -25,6 +25,7 @@ config DRM_I915_DEBUG
|
||||
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
||||
select DRM_DEBUG_MM if DRM=y
|
||||
select DRM_DEBUG_MM_SELFTEST
|
||||
select SW_SYNC # signaling validation framework (igt/syncobj*)
|
||||
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
select DRM_I915_SELFTEST
|
||||
default n
|
||||
|
@@ -39,6 +39,7 @@ i915-y += i915_cmd_parser.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem_internal.o \
|
||||
i915_gem.o \
|
||||
i915_gem_object.o \
|
||||
i915_gem_render_state.o \
|
||||
i915_gem_request.o \
|
||||
i915_gem_shrinker.o \
|
||||
|
@@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
|
||||
no_enough_resource:
|
||||
gvt_vgpu_err("fail to allocate resource %s\n", item);
|
||||
gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
|
||||
gvt_err("fail to allocate resource %s\n", item);
|
||||
gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
|
||||
BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
||||
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
||||
return -ENOSPC;
|
||||
|
@@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
|
||||
(!vgpu_gmadr_is_valid(s->vgpu,
|
||||
guest_gma + op_size - 1))) {
|
||||
} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
||||
s->info->name, guest_gma, op_size);
|
||||
@@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int ret;
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
|
@@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
|
||||
|
||||
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
||||
|
||||
|
@@ -178,9 +178,9 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
SDE_PORTE_HOTPLUG_SPT);
|
||||
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
|
||||
SKL_FUSE_DOWNLOAD_STATUS |
|
||||
SKL_FUSE_PG0_DIST_STATUS |
|
||||
SKL_FUSE_PG1_DIST_STATUS |
|
||||
SKL_FUSE_PG2_DIST_STATUS;
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
|
||||
vgpu_vreg(vgpu, LCPLL1_CTL) |=
|
||||
LCPLL_PLL_ENABLE |
|
||||
LCPLL_PLL_LOCK;
|
||||
|
@@ -622,6 +622,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||
struct list_head *q = workload_q_head(vgpu, ring_id);
|
||||
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u64 ring_context_gpa;
|
||||
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
||||
int ret;
|
||||
@@ -685,6 +686,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||
workload->complete = complete_execlist_workload;
|
||||
workload->status = -EINPROGRESS;
|
||||
workload->emulate_schedule_in = emulate_schedule_in;
|
||||
workload->shadowed = false;
|
||||
|
||||
if (ring_id == RCS) {
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
@@ -718,6 +720,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Only scan and shadow the first workload in the queue
|
||||
* as there is only one pre-allocated buf-obj for shadow.
|
||||
*/
|
||||
if (list_empty(workload_q_head(vgpu, ring_id))) {
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
intel_gvt_scan_and_shadow_workload(workload);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
queue_workload(workload);
|
||||
return 0;
|
||||
}
|
||||
@@ -800,6 +813,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
list_del_init(&pos->list);
|
||||
free_workload(pos);
|
||||
}
|
||||
|
||||
clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
writeq(pte, addr);
|
||||
}
|
||||
|
||||
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
||||
static inline int gtt_get_entry64(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu)
|
||||
@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(info->gtt_entry_size != 8))
|
||||
return e;
|
||||
return -EINVAL;
|
||||
|
||||
if (hypervisor_access) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
||||
(index << info->gtt_entry_size_shift),
|
||||
&e->val64, 8);
|
||||
WARN_ON(ret);
|
||||
if (WARN_ON(ret))
|
||||
return ret;
|
||||
} else if (!pt) {
|
||||
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
|
||||
} else {
|
||||
e->val64 = *((u64 *)pt + index);
|
||||
}
|
||||
return e;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
||||
static inline int gtt_set_entry64(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu)
|
||||
@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(info->gtt_entry_size != 8))
|
||||
return e;
|
||||
return -EINVAL;
|
||||
|
||||
if (hypervisor_access) {
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
||||
(index << info->gtt_entry_size_shift),
|
||||
&e->val64, 8);
|
||||
WARN_ON(ret);
|
||||
if (WARN_ON(ret))
|
||||
return ret;
|
||||
} else if (!pt) {
|
||||
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
|
||||
} else {
|
||||
*((u64 *)pt + index) = e->val64;
|
||||
}
|
||||
return e;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GTT_HAW 46
|
||||
@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
|
||||
/*
|
||||
* MM helpers.
|
||||
*/
|
||||
struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
||||
int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index)
|
||||
{
|
||||
struct intel_gvt *gvt = mm->vgpu->gvt;
|
||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
int ret;
|
||||
|
||||
e->type = mm->page_table_entry_type;
|
||||
|
||||
ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
||||
ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops->test_pse(e);
|
||||
return e;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
||||
int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index)
|
||||
{
|
||||
@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
||||
/*
|
||||
* PPGTT shadow page table helpers.
|
||||
*/
|
||||
static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
||||
static inline int ppgtt_spt_get_entry(
|
||||
struct intel_vgpu_ppgtt_spt *spt,
|
||||
void *page_table, int type,
|
||||
struct intel_gvt_gtt_entry *e, unsigned long index,
|
||||
@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
||||
{
|
||||
struct intel_gvt *gvt = spt->vgpu->gvt;
|
||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
int ret;
|
||||
|
||||
e->type = get_entry_type(type);
|
||||
|
||||
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
||||
return e;
|
||||
return -EINVAL;
|
||||
|
||||
ops->get_entry(page_table, e, index, guest,
|
||||
ret = ops->get_entry(page_table, e, index, guest,
|
||||
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
||||
spt->vgpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops->test_pse(e);
|
||||
return e;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
||||
static inline int ppgtt_spt_set_entry(
|
||||
struct intel_vgpu_ppgtt_spt *spt,
|
||||
void *page_table, int type,
|
||||
struct intel_gvt_gtt_entry *e, unsigned long index,
|
||||
@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
|
||||
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
||||
return e;
|
||||
return -EINVAL;
|
||||
|
||||
return ops->set_entry(page_table, e, index, guest,
|
||||
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
||||
@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
|
||||
|
||||
#define for_each_present_guest_entry(spt, e, i) \
|
||||
for (i = 0; i < pt_entries(spt); i++) \
|
||||
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
||||
ppgtt_get_guest_entry(spt, e, i)))
|
||||
if (!ppgtt_get_guest_entry(spt, e, i) && \
|
||||
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||
|
||||
#define for_each_present_shadow_entry(spt, e, i) \
|
||||
for (i = 0; i < pt_entries(spt); i++) \
|
||||
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
||||
ppgtt_get_shadow_entry(spt, e, i)))
|
||||
if (!ppgtt_get_shadow_entry(spt, e, i) && \
|
||||
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||
|
||||
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||
{
|
||||
@@ -979,29 +989,26 @@ fail:
|
||||
}
|
||||
|
||||
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||
unsigned long index)
|
||||
struct intel_gvt_gtt_entry *se, unsigned long index)
|
||||
{
|
||||
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
||||
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
|
||||
struct intel_vgpu *vgpu = spt->vgpu;
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry e;
|
||||
int ret;
|
||||
|
||||
ppgtt_get_shadow_entry(spt, &e, index);
|
||||
|
||||
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
|
||||
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
|
||||
index);
|
||||
|
||||
if (!ops->test_present(&e))
|
||||
if (!ops->test_present(se))
|
||||
return 0;
|
||||
|
||||
if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
||||
if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
||||
return 0;
|
||||
|
||||
if (gtt_type_is_pt(get_next_pt_type(e.type))) {
|
||||
if (gtt_type_is_pt(get_next_pt_type(se->type))) {
|
||||
struct intel_vgpu_ppgtt_spt *s =
|
||||
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
|
||||
ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
|
||||
if (!s) {
|
||||
gvt_vgpu_err("fail to find guest page\n");
|
||||
ret = -ENXIO;
|
||||
@@ -1011,12 +1018,10 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &e, index);
|
||||
return 0;
|
||||
fail:
|
||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||
spt, e.val64, e.type);
|
||||
spt, se->val64, se->type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1236,22 +1241,37 @@ static int ppgtt_handle_guest_write_page_table(
|
||||
{
|
||||
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
||||
struct intel_vgpu *vgpu = spt->vgpu;
|
||||
int type = spt->shadow_page.type;
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry se;
|
||||
|
||||
int ret;
|
||||
int new_present;
|
||||
|
||||
new_present = ops->test_present(we);
|
||||
|
||||
ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
||||
if (ret)
|
||||
goto fail;
|
||||
/*
|
||||
* Adding the new entry first and then removing the old one, that can
|
||||
* guarantee the ppgtt table is validated during the window between
|
||||
* adding and removal.
|
||||
*/
|
||||
ppgtt_get_shadow_entry(spt, &se, index);
|
||||
|
||||
if (new_present) {
|
||||
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!new_present) {
|
||||
ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &se, index);
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
||||
@@ -1323,7 +1343,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
||||
struct intel_vgpu *vgpu = spt->vgpu;
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
struct intel_gvt_gtt_entry we;
|
||||
struct intel_gvt_gtt_entry we, se;
|
||||
unsigned long index;
|
||||
int ret;
|
||||
|
||||
@@ -1339,7 +1359,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
||||
return ret;
|
||||
} else {
|
||||
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
||||
ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
||||
ppgtt_get_shadow_entry(spt, &se, index);
|
||||
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@@ -1713,8 +1734,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
||||
goto err;
|
||||
|
||||
ggtt_get_guest_entry(mm, &e,
|
||||
gma_ops->gma_to_ggtt_pte_index(gma));
|
||||
ret = ggtt_get_guest_entry(mm, &e,
|
||||
gma_ops->gma_to_ggtt_pte_index(gma));
|
||||
if (ret)
|
||||
goto err;
|
||||
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
||||
+ (gma & ~GTT_PAGE_MASK);
|
||||
|
||||
@@ -1724,7 +1747,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||
|
||||
switch (mm->page_table_level) {
|
||||
case 4:
|
||||
ppgtt_get_shadow_root_entry(mm, &e, 0);
|
||||
ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
||||
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
||||
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
||||
@@ -1732,15 +1757,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||
index = 4;
|
||||
break;
|
||||
case 3:
|
||||
ppgtt_get_shadow_root_entry(mm, &e,
|
||||
ret = ppgtt_get_shadow_root_entry(mm, &e,
|
||||
gma_ops->gma_to_l3_pdp_index(gma));
|
||||
if (ret)
|
||||
goto err;
|
||||
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
||||
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
||||
index = 2;
|
||||
break;
|
||||
case 2:
|
||||
ppgtt_get_shadow_root_entry(mm, &e,
|
||||
ret = ppgtt_get_shadow_root_entry(mm, &e,
|
||||
gma_ops->gma_to_pde_index(gma));
|
||||
if (ret)
|
||||
goto err;
|
||||
gma_index[0] = gma_ops->gma_to_pte_index(gma);
|
||||
index = 1;
|
||||
break;
|
||||
@@ -1755,6 +1784,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||
(i == index - 1));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!pte_ops->test_present(&e)) {
|
||||
gvt_dbg_core("GMA 0x%lx is not present\n", gma);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
||||
@@ -2329,13 +2363,12 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
/**
|
||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||
* @vgpu: a vGPU
|
||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
||||
*
|
||||
* This function is called from vfio core to reset reset all
|
||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -2347,9 +2380,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
*/
|
||||
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
||||
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
/* clear scratch page for security */
|
||||
|
@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
|
||||
};
|
||||
|
||||
struct intel_gvt_gtt_pte_ops {
|
||||
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
int (*get_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index,
|
||||
bool hypervisor_access,
|
||||
unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
int (*set_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index,
|
||||
bool hypervisor_access,
|
||||
unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
bool (*test_present)(struct intel_gvt_gtt_entry *e);
|
||||
void (*clear_present)(struct intel_gvt_gtt_entry *e);
|
||||
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
|
||||
@@ -143,12 +147,12 @@ struct intel_vgpu_mm {
|
||||
struct intel_vgpu *vgpu;
|
||||
};
|
||||
|
||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
|
||||
extern int intel_vgpu_mm_get_entry(
|
||||
struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index);
|
||||
|
||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
|
||||
extern int intel_vgpu_mm_set_entry(
|
||||
struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index);
|
||||
@@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
@@ -167,6 +167,7 @@ struct intel_vgpu {
|
||||
atomic_t running_workload_num;
|
||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||
struct i915_gem_context *shadow_ctx;
|
||||
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
||||
struct {
|
||||
@@ -482,6 +483,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
||||
unsigned int);
|
||||
|
@@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
|
||||
info->offset = i;
|
||||
p = find_mmio_info(gvt, info->offset);
|
||||
if (p)
|
||||
gvt_err("dup mmio definition offset %x\n",
|
||||
if (p) {
|
||||
WARN(1, "dup mmio definition offset %x\n",
|
||||
info->offset);
|
||||
kfree(info);
|
||||
|
||||
/* We return -EEXIST here to make GVT-g load fail.
|
||||
* So duplicated MMIO can be found as soon as
|
||||
* possible.
|
||||
*/
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
info->ro_mask = ro_mask;
|
||||
info->device = device;
|
||||
@@ -1222,10 +1230,12 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
|
||||
{
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
|
||||
vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
|
||||
if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
|
||||
vgpu_vreg(vgpu, offset) |=
|
||||
HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
|
||||
vgpu_vreg(vgpu, offset) &=
|
||||
~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2242,10 +2252,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
|
||||
MMIO_D(GEN6_PMINTRMSK, D_ALL);
|
||||
MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
/*
|
||||
* Use an arbitrary power well controlled by the PWR_WELL_CTL
|
||||
* register.
|
||||
*/
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
|
||||
power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
|
||||
power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
|
||||
power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
|
||||
|
||||
@@ -2581,7 +2598,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
|
||||
NULL, force_nonpriv_write);
|
||||
|
||||
MMIO_D(0x22040, D_BDW_PLUS);
|
||||
MMIO_D(0x44484, D_BDW_PLUS);
|
||||
MMIO_D(0x4448c, D_BDW_PLUS);
|
||||
|
||||
@@ -2636,9 +2652,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
|
||||
dp_aux_ch_ctl_mmio_write);
|
||||
|
||||
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
|
||||
skl_power_well_ctl_write);
|
||||
/*
|
||||
* Use an arbitrary power well controlled by the PWR_WELL_CTL
|
||||
* register.
|
||||
*/
|
||||
MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
|
||||
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
|
||||
skl_power_well_ctl_write);
|
||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
|
||||
|
||||
MMIO_D(0xa210, D_SKL_PLUS);
|
||||
@@ -2831,7 +2851,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x320f0, D_SKL | D_KBL);
|
||||
|
||||
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0x70034, D_SKL_PLUS);
|
||||
MMIO_D(0x71034, D_SKL_PLUS);
|
||||
MMIO_D(0x72034, D_SKL_PLUS);
|
||||
@@ -2849,10 +2868,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(0x4ab8, D_KBL);
|
||||
MMIO_D(0x940c, D_SKL_PLUS);
|
||||
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
|
||||
MMIO_D(0x4ab0, D_SKL | D_KBL);
|
||||
MMIO_D(0x20d4, D_SKL | D_KBL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
|
||||
return sprintf(buf, "\n");
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
hw_id_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
|
||||
if (mdev) {
|
||||
struct intel_vgpu *vgpu = (struct intel_vgpu *)
|
||||
mdev_get_drvdata(mdev);
|
||||
return sprintf(buf, "%u\n",
|
||||
vgpu->shadow_ctx->hw_id);
|
||||
}
|
||||
return sprintf(buf, "\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(vgpu_id);
|
||||
static DEVICE_ATTR_RO(hw_id);
|
||||
|
||||
static struct attribute *intel_vgpu_attrs[] = {
|
||||
&dev_attr_vgpu_id.attr,
|
||||
&dev_attr_hw_id.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs[ring_id][i] = I915_READ(offset);
|
||||
gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
|
||||
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
|
||||
POSTING_READ(offset);
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
|
||||
I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
||||
POSTING_READ(l3_offset);
|
||||
gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
|
||||
I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
@@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(offset);
|
||||
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
|
||||
POSTING_READ(offset);
|
||||
vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
|
||||
I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
|
||||
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
|
||||
POSTING_READ(l3_offset);
|
||||
vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
|
||||
I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
@@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
||||
u32 inhibit_mask =
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
i915_reg_t last_reg = _MMIO(0);
|
||||
|
||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||
@@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
mmio->value = I915_READ(mmio->reg);
|
||||
mmio->value = I915_READ_FW(mmio->reg);
|
||||
|
||||
/*
|
||||
* if it is an inhibit context, load in_context mmio
|
||||
@@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||
else
|
||||
v = vgpu_vreg(vgpu, mmio->reg);
|
||||
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
I915_WRITE_FW(mmio->reg, v);
|
||||
last_reg = mmio->reg;
|
||||
|
||||
trace_render_mmio(vgpu->id, "load",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
|
||||
/* Make sure the swiched MMIOs has taken effect. */
|
||||
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
|
||||
I915_READ_FW(last_reg);
|
||||
|
||||
handle_tlb_pending_event(vgpu, ring_id);
|
||||
}
|
||||
|
||||
@@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct render_mmio *mmio;
|
||||
i915_reg_t last_reg = _MMIO(0);
|
||||
u32 v;
|
||||
int i, array_size;
|
||||
|
||||
@@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
|
||||
vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
|
||||
|
||||
if (mmio->mask) {
|
||||
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
|
||||
@@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
if (mmio->in_context)
|
||||
continue;
|
||||
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
I915_WRITE_FW(mmio->reg, v);
|
||||
last_reg = mmio->reg;
|
||||
|
||||
trace_render_mmio(vgpu->id, "restore",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
|
||||
/* Make sure the swiched MMIOs has taken effect. */
|
||||
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
|
||||
I915_READ_FW(last_reg);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
struct intel_vgpu *next, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
if (WARN_ON(!pre && !next))
|
||||
return;
|
||||
|
||||
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
|
||||
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
|
||||
|
||||
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
|
||||
|
||||
/**
|
||||
* We are using raw mmio access wrapper to improve the
|
||||
* performace for batch mmio read/write, so we need
|
||||
* handle forcewake mannually.
|
||||
*/
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/**
|
||||
* TODO: Optimize for vGPU to vGPU switch by merging
|
||||
* switch_mmio_to_host() and switch_mmio_to_vgpu().
|
||||
@@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||
|
||||
if (next)
|
||||
switch_mmio_to_vgpu(next, ring_id);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
u64 desc = 0;
|
||||
|
||||
desc = ce->lrc_desc;
|
||||
|
||||
/* Update bits 0-11 of the context descriptor which includes flags
|
||||
* like GEN8_CTX_* cached in desc_template
|
||||
*/
|
||||
desc &= U64_MAX << 12;
|
||||
desc |= ctx->desc_template & ((1ULL << 12) - 1);
|
||||
|
||||
ce->lrc_desc = desc;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
|
||||
* shadow it as well, include ringbuffer,wa_ctx and ctx.
|
||||
* @workload: an abstract entity for each execlist submission.
|
||||
*
|
||||
* This function is called before the workload submitting to i915, to make
|
||||
* sure the content of the workload is valid.
|
||||
*/
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||
struct drm_i915_gem_request *rq;
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||
ring_id, workload);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (workload->shadowed)
|
||||
return 0;
|
||||
|
||||
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
||||
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* pin shadow context by gvt even the shadow context will be pinned
|
||||
* when i915 alloc request. That is because gvt will update the guest
|
||||
* context from shadow context when workload is completed, and at that
|
||||
* moment, i915 may already unpined the shadow context to make the
|
||||
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
||||
* the guest context, gvt can unpin the shadow_ctx safely.
|
||||
*/
|
||||
ring = engine->context_pin(engine, shadow_ctx);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
gvt_vgpu_err("fail to pin shadow context\n");
|
||||
workload->status = ret;
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
|
||||
shadow_context_descriptor_update(shadow_ctx,
|
||||
dev_priv->engine[ring_id]);
|
||||
|
||||
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
||||
if (IS_ERR(rq)) {
|
||||
@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
|
||||
workload->req = i915_gem_request_get(rq);
|
||||
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
workload->shadowed = true;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_ring *ring;
|
||||
int ret = 0;
|
||||
|
||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||
ring_id, workload);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (workload->prepare) {
|
||||
ret = workload->prepare(workload);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||
ring_id, workload->req);
|
||||
/* pin shadow context by gvt even the shadow context will be pinned
|
||||
* when i915 alloc request. That is because gvt will update the guest
|
||||
* context from shadow context when workload is completed, and at that
|
||||
* moment, i915 may already unpined the shadow context to make the
|
||||
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
||||
* the guest context, gvt can unpin the shadow_ctx safely.
|
||||
*/
|
||||
ring = engine->context_pin(engine, shadow_ctx);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
gvt_vgpu_err("fail to pin shadow context\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
workload->dispatched = true;
|
||||
out:
|
||||
if (ret)
|
||||
workload->status = ret;
|
||||
|
||||
if (!IS_ERR_OR_NULL(rq))
|
||||
i915_add_request(rq);
|
||||
else
|
||||
engine->context_unpin(engine, shadow_ctx);
|
||||
if (!IS_ERR_OR_NULL(workload->req)) {
|
||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||
ring_id, workload->req);
|
||||
i915_add_request(workload->req);
|
||||
workload->dispatched = true;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return ret;
|
||||
@@ -617,7 +664,7 @@ err:
|
||||
|
||||
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
|
||||
{
|
||||
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
|
||||
i915_gem_context_put(vgpu->shadow_ctx);
|
||||
}
|
||||
|
||||
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||
@@ -631,5 +678,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||
|
||||
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
||||
|
||||
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -82,6 +82,7 @@ struct intel_vgpu_workload {
|
||||
struct drm_i915_gem_request *req;
|
||||
/* if this workload has been dispatched to i915? */
|
||||
bool dispatched;
|
||||
bool shadowed;
|
||||
int status;
|
||||
|
||||
struct intel_vgpu_mm *shadow_mm;
|
||||
|
@@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||
vgpu_aperture_gmadr_base(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
||||
@@ -504,11 +505,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
|
||||
/*fence will not be reset during virtual reset */
|
||||
if (dmlr)
|
||||
if (dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu, dmlr);
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
@@ -543,75 +543,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
const char pipe = pipe_name(crtc->pipe);
|
||||
const char plane = plane_name(crtc->plane);
|
||||
struct intel_flip_work *work;
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
work = crtc->flip_work;
|
||||
if (work == NULL) {
|
||||
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
u32 pending;
|
||||
u32 addr;
|
||||
|
||||
pending = atomic_read(&work->pending);
|
||||
if (pending) {
|
||||
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
}
|
||||
if (work->flip_queued_req) {
|
||||
struct intel_engine_cs *engine = work->flip_queued_req->engine;
|
||||
|
||||
seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
|
||||
engine->name,
|
||||
work->flip_queued_req->global_seqno,
|
||||
intel_engine_last_submit(engine),
|
||||
intel_engine_get_seqno(engine),
|
||||
i915_gem_request_completed(work->flip_queued_req));
|
||||
} else
|
||||
seq_printf(m, "Flip not associated with any ring\n");
|
||||
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
|
||||
work->flip_queued_vblank,
|
||||
work->flip_ready_vblank,
|
||||
intel_crtc_get_vblank_counter(crtc));
|
||||
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
|
||||
else
|
||||
addr = I915_READ(DSPADDR(crtc->plane));
|
||||
seq_printf(m, "Current scanout address 0x%08x\n", addr);
|
||||
|
||||
if (work->pending_flip_obj) {
|
||||
seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
|
||||
seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
@@ -1159,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
reqf = I915_READ(GEN6_RPNSWREQ);
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
reqf >>= 23;
|
||||
else {
|
||||
reqf &= ~GEN6_TURBO_DISABLE;
|
||||
@@ -1181,7 +1112,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
||||
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
|
||||
@@ -1210,7 +1141,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
dev_priv->rps.pm_intrmsk_mbz);
|
||||
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
|
||||
seq_printf(m, "Render p-state ratio: %d\n",
|
||||
(gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
|
||||
(gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
|
||||
seq_printf(m, "Render p-state VID: %d\n",
|
||||
gt_perf_status & 0xff);
|
||||
seq_printf(m, "Render p-state limit: %d\n",
|
||||
@@ -1241,18 +1172,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
|
||||
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
|
||||
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
@@ -1407,6 +1341,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_reset_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
seq_printf(m, "%s = %u\n", engine->name,
|
||||
i915_reset_engine_count(error, engine));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_drpc_info(struct seq_file *m)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
@@ -1838,7 +1789,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq =
|
||||
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
|
||||
@@ -1858,7 +1809,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
&ia_freq);
|
||||
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
||||
intel_gpu_freq(dev_priv, (gpu_freq *
|
||||
(IS_GEN9_BC(dev_priv) ?
|
||||
(IS_GEN9_BC(dev_priv) ||
|
||||
IS_CANNONLAKE(dev_priv) ?
|
||||
GEN9_FREQ_SCALER : 1))),
|
||||
((ia_freq >> 0) & 0xff) * 100,
|
||||
((ia_freq >> 8) & 0xff) * 100);
|
||||
@@ -1914,7 +1866,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
if (dev_priv->fbdev) {
|
||||
if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
|
||||
fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
|
||||
|
||||
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
|
||||
@@ -1970,7 +1922,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
|
||||
seq_printf(m, "HW context %u ", ctx->hw_id);
|
||||
if (ctx->pid) {
|
||||
struct task_struct *task;
|
||||
@@ -2002,12 +1954,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
seq_printf(m,
|
||||
"\tvma hashtable size=%u (actual %lu), count=%u\n",
|
||||
ctx->vma_lut.ht_size,
|
||||
BIT(ctx->vma_lut.ht_bits),
|
||||
ctx->vma_lut.ht_count);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
@@ -2076,7 +2022,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link)
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
i915_dump_lrc_obj(m, ctx, engine);
|
||||
|
||||
@@ -2310,6 +2256,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "GPU busy? %s [%d requests]\n",
|
||||
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
|
||||
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
|
||||
seq_printf(m, "Boosts outstanding? %d\n",
|
||||
atomic_read(&dev_priv->rps.num_waiters));
|
||||
seq_printf(m, "Frequency requested %d\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
|
||||
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
|
||||
@@ -2323,22 +2271,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
|
||||
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
spin_lock(&dev_priv->rps.client_lock);
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = pid_task(file->pid, PIDTYPE_PID);
|
||||
seq_printf(m, "%s [%d]: %d boosts%s\n",
|
||||
seq_printf(m, "%s [%d]: %d boosts\n",
|
||||
task ? task->comm : "<unknown>",
|
||||
task ? task->pid : -1,
|
||||
file_priv->rps.boosts,
|
||||
list_empty(&file_priv->rps.link) ? "" : ", active");
|
||||
atomic_read(&file_priv->rps.boosts));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
|
||||
spin_unlock(&dev_priv->rps.client_lock);
|
||||
seq_printf(m, "Kernel (anonymous) boosts: %d\n",
|
||||
atomic_read(&dev_priv->rps.boosts));
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6 &&
|
||||
@@ -2831,7 +2777,7 @@ out:
|
||||
static int i915_energy_uJ(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
u64 power;
|
||||
unsigned long long power;
|
||||
u32 units;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 6)
|
||||
@@ -2839,15 +2785,18 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
rdmsrl(MSR_RAPL_POWER_UNIT, power);
|
||||
power = (power & 0x1f00) >> 8;
|
||||
units = 1000000 / (1 << power); /* convert to uJ */
|
||||
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
units = (power & 0x1f00) >> 8;
|
||||
power = I915_READ(MCH_SECP_NRG_STTS);
|
||||
power *= units;
|
||||
power = (1000000 * power) >> units; /* convert to uJ */
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
seq_printf(m, "%llu", (long long unsigned)power);
|
||||
seq_printf(m, "%llu", power);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3289,6 +3238,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
||||
static int i915_engine_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
@@ -3312,6 +3262,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||
engine->hangcheck.seqno,
|
||||
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
|
||||
engine->timeline->inflight_seqnos);
|
||||
seq_printf(m, "\tReset count: %d\n",
|
||||
i915_reset_engine_count(error, engine));
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@@ -3370,8 +3322,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
||||
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
|
||||
read = GEN8_CSB_READ_PTR(ptr);
|
||||
write = GEN8_CSB_WRITE_PTR(ptr);
|
||||
seq_printf(m, "\tExeclist CSB read %d, write %d\n",
|
||||
read, write);
|
||||
seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
|
||||
read, write,
|
||||
yesno(test_bit(ENGINE_IRQ_EXECLIST,
|
||||
&engine->irq_posted)));
|
||||
if (read >= GEN8_CSB_ENTRIES)
|
||||
read = 0;
|
||||
if (write >= GEN8_CSB_ENTRIES)
|
||||
@@ -3758,13 +3712,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
encoder = to_intel_encoder(connector->encoder);
|
||||
if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
if (encoder && connector->status == connector_status_connected) {
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
status = kstrtoint(input_buffer, 10, &val);
|
||||
if (status < 0)
|
||||
break;
|
||||
@@ -3796,13 +3755,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
encoder = to_intel_encoder(connector->encoder);
|
||||
if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
if (encoder && connector->status == connector_status_connected) {
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
if (intel_dp->compliance.test_active)
|
||||
seq_puts(m, "1");
|
||||
else
|
||||
@@ -3842,13 +3806,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
encoder = to_intel_encoder(connector->encoder);
|
||||
if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
if (encoder && connector->status == connector_status_connected) {
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
if (intel_dp->compliance.test_type ==
|
||||
DP_TEST_LINK_EDID_READ)
|
||||
seq_printf(m, "%lx",
|
||||
@@ -3895,13 +3864,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (connector->connector_type !=
|
||||
DRM_MODE_CONNECTOR_DisplayPort)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected &&
|
||||
connector->encoder != NULL) {
|
||||
intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
encoder = to_intel_encoder(connector->encoder);
|
||||
if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
if (encoder && connector->status == connector_status_connected) {
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
@@ -4810,7 +4784,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_gem_gtt", i915_gem_gtt_info, 0},
|
||||
{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
|
||||
{"i915_gem_stolen", i915_gem_stolen_list_info },
|
||||
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
|
||||
{"i915_gem_request", i915_gem_request_info, 0},
|
||||
{"i915_gem_seqno", i915_gem_seqno_info, 0},
|
||||
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
|
||||
@@ -4824,6 +4797,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_huc_load_status", i915_huc_load_status_info, 0},
|
||||
{"i915_frequency_info", i915_frequency_info, 0},
|
||||
{"i915_hangcheck_info", i915_hangcheck_info, 0},
|
||||
{"i915_reset_info", i915_reset_info, 0},
|
||||
{"i915_drpc_info", i915_drpc_info, 0},
|
||||
{"i915_emon_status", i915_emon_status, 0},
|
||||
{"i915_ring_freq_table", i915_ring_freq_table, 0},
|
||||
|
@@ -132,9 +132,13 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
|
||||
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
|
||||
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
|
||||
ret = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
|
||||
DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
ret = PCH_LPT;
|
||||
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
|
||||
dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
|
||||
else
|
||||
dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
|
||||
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
|
||||
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ret = PCH_SPT;
|
||||
@@ -173,29 +177,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
|
||||
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
|
||||
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
|
||||
unsigned short id_ext = pch->device &
|
||||
INTEL_PCH_DEVICE_ID_MASK_EXT;
|
||||
|
||||
dev_priv->pch_id = id;
|
||||
|
||||
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_IBX;
|
||||
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
|
||||
WARN_ON(!IS_GEN5(dev_priv));
|
||||
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
|
||||
WARN_ON(!(IS_GEN6(dev_priv) ||
|
||||
IS_IVYBRIDGE(dev_priv)));
|
||||
WARN_ON(!IS_GEN6(dev_priv) &&
|
||||
!IS_IVYBRIDGE(dev_priv));
|
||||
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
|
||||
/* PantherPoint is CPT compatible */
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
|
||||
WARN_ON(!(IS_GEN6(dev_priv) ||
|
||||
IS_IVYBRIDGE(dev_priv)));
|
||||
WARN_ON(!IS_GEN6(dev_priv) &&
|
||||
!IS_IVYBRIDGE(dev_priv));
|
||||
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
@@ -203,51 +203,60 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
WARN_ON(IS_HSW_ULT(dev_priv) ||
|
||||
IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv));
|
||||
WARN_ON(!IS_HSW_ULT(dev_priv) &&
|
||||
!IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
|
||||
/* WildcatPoint is LPT compatible */
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv));
|
||||
WARN_ON(IS_HSW_ULT(dev_priv) ||
|
||||
IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
|
||||
/* WildcatPoint is LPT compatible */
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv));
|
||||
WARN_ON(!IS_HSW_ULT(dev_priv) &&
|
||||
!IS_BDW_ULT(dev_priv));
|
||||
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id_ext;
|
||||
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_KBP;
|
||||
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
|
||||
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_KABYLAKE(dev_priv));
|
||||
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type = PCH_CNP;
|
||||
DRM_DEBUG_KMS("Found CannonPoint PCH\n");
|
||||
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
||||
!IS_COFFEELAKE(dev_priv));
|
||||
} else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_id = id_ext;
|
||||
} else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_CNP;
|
||||
DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
|
||||
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
|
||||
!IS_COFFEELAKE(dev_priv));
|
||||
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
|
||||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
|
||||
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
|
||||
id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
|
||||
(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
|
||||
pch->subsystem_vendor ==
|
||||
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
|
||||
pch->subsystem_device ==
|
||||
PCI_SUBDEVICE_ID_QEMU)) {
|
||||
dev_priv->pch_id = id;
|
||||
dev_priv->pch_type =
|
||||
intel_virt_detect_pch(dev_priv);
|
||||
} else
|
||||
@@ -331,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
break;
|
||||
case I915_PARAM_HAS_GPU_RESET:
|
||||
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
|
||||
if (value && intel_has_reset_engine(dev_priv))
|
||||
value = 2;
|
||||
break;
|
||||
case I915_PARAM_HAS_RESOURCE_STREAMER:
|
||||
value = HAS_RESOURCE_STREAMER(dev_priv);
|
||||
@@ -377,6 +388,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
case I915_PARAM_HAS_EXEC_FENCE:
|
||||
case I915_PARAM_HAS_EXEC_CAPTURE:
|
||||
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
|
||||
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
|
||||
/* For the time being all of these are always true;
|
||||
* if some supported hardware does not have one of these
|
||||
* features this value needs to be provided from
|
||||
@@ -585,16 +597,19 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
|
||||
|
||||
static void i915_gem_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Flush any outstanding unpin_work. */
|
||||
i915_gem_drain_workqueue(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
intel_uc_fini_hw(dev_priv);
|
||||
i915_gem_cleanup_engines(dev_priv);
|
||||
i915_gem_context_fini(dev_priv);
|
||||
i915_gem_contexts_fini(dev_priv);
|
||||
i915_gem_cleanup_userptr(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
WARN_ON(!list_empty(&dev_priv->context_list));
|
||||
WARN_ON(!list_empty(&dev_priv->contexts.list));
|
||||
}
|
||||
|
||||
static int i915_load_modeset_init(struct drm_device *dev)
|
||||
@@ -862,7 +877,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
@@ -1227,6 +1241,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_fbdev_unregister(dev_priv);
|
||||
intel_audio_deinit(dev_priv);
|
||||
|
||||
intel_gpu_ips_teardown();
|
||||
@@ -1319,7 +1334,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
ret = i915_load_modeset_init(&dev_priv->drm);
|
||||
if (ret < 0)
|
||||
goto out_cleanup_vblank;
|
||||
goto out_cleanup_hw;
|
||||
|
||||
i915_driver_register(dev_priv);
|
||||
|
||||
@@ -1336,8 +1351,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
return 0;
|
||||
|
||||
out_cleanup_vblank:
|
||||
drm_vblank_cleanup(&dev_priv->drm);
|
||||
out_cleanup_hw:
|
||||
i915_driver_cleanup_hw(dev_priv);
|
||||
out_cleanup_mmio:
|
||||
@@ -1360,7 +1373,7 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
i915_driver_unregister(dev_priv);
|
||||
|
||||
if (i915_gem_suspend(dev_priv))
|
||||
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
||||
@@ -1371,10 +1384,6 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_gvt_cleanup(dev_priv);
|
||||
|
||||
i915_driver_unregister(dev_priv);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
intel_modeset_cleanup(dev);
|
||||
|
||||
/*
|
||||
@@ -1400,9 +1409,6 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
i915_reset_error_state(dev_priv);
|
||||
|
||||
/* Flush any outstanding unpin_work. */
|
||||
drain_workqueue(dev_priv->wq);
|
||||
|
||||
i915_gem_fini(dev_priv);
|
||||
intel_uc_fini_fw(dev_priv);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
@@ -1427,9 +1433,10 @@ static void i915_driver_release(struct drm_device *dev)
|
||||
|
||||
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_open(dev, file);
|
||||
ret = i915_gem_open(i915, file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1459,7 +1466,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_context_close(dev, file);
|
||||
i915_gem_context_close(file);
|
||||
i915_gem_release(dev, file);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@@ -1825,7 +1832,8 @@ static int i915_resume_switcheroo(struct drm_device *dev)
|
||||
|
||||
/**
|
||||
* i915_reset - reset chip after a hang
|
||||
* @dev_priv: device private to reset
|
||||
* @i915: #drm_i915_private to reset
|
||||
* @flags: Instructions
|
||||
*
|
||||
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
|
||||
* on failure.
|
||||
@@ -1840,33 +1848,34 @@ static int i915_resume_switcheroo(struct drm_device *dev)
|
||||
* - re-init interrupt state
|
||||
* - re-init display
|
||||
*/
|
||||
void i915_reset(struct drm_i915_private *dev_priv)
|
||||
void i915_reset(struct drm_i915_private *i915, unsigned int flags)
|
||||
{
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
struct i915_gpu_error *error = &i915->gpu_error;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
|
||||
|
||||
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
|
||||
return;
|
||||
|
||||
/* Clear any previous failed attempts at recovery. Time to try again. */
|
||||
if (!i915_gem_unset_wedged(dev_priv))
|
||||
if (!i915_gem_unset_wedged(i915))
|
||||
goto wakeup;
|
||||
|
||||
if (!(flags & I915_RESET_QUIET))
|
||||
dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
|
||||
error->reset_count++;
|
||||
|
||||
pr_notice("drm/i915: Resetting chip after gpu hang\n");
|
||||
disable_irq(dev_priv->drm.irq);
|
||||
ret = i915_gem_reset_prepare(dev_priv);
|
||||
disable_irq(i915->drm.irq);
|
||||
ret = i915_gem_reset_prepare(i915);
|
||||
if (ret) {
|
||||
DRM_ERROR("GPU recovery failed\n");
|
||||
intel_gpu_reset(dev_priv, ALL_ENGINES);
|
||||
intel_gpu_reset(i915, ALL_ENGINES);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
|
||||
ret = intel_gpu_reset(i915, ALL_ENGINES);
|
||||
if (ret) {
|
||||
if (ret != -ENODEV)
|
||||
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
||||
@@ -1875,8 +1884,8 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_gem_reset(dev_priv);
|
||||
intel_overlay_reset(dev_priv);
|
||||
i915_gem_reset(i915);
|
||||
intel_overlay_reset(i915);
|
||||
|
||||
/* Ok, now get things going again... */
|
||||
|
||||
@@ -1892,17 +1901,17 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||
* was running at the time of the reset (i.e. we weren't VT
|
||||
* switched away).
|
||||
*/
|
||||
ret = i915_gem_init_hw(dev_priv);
|
||||
ret = i915_gem_init_hw(i915);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed hw init on reset %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
i915_queue_hangcheck(i915);
|
||||
|
||||
finish:
|
||||
i915_gem_reset_finish(dev_priv);
|
||||
enable_irq(dev_priv->drm.irq);
|
||||
i915_gem_reset_finish(i915);
|
||||
enable_irq(i915->drm.irq);
|
||||
|
||||
wakeup:
|
||||
clear_bit(I915_RESET_HANDOFF, &error->flags);
|
||||
@@ -1910,10 +1919,74 @@ wakeup:
|
||||
return;
|
||||
|
||||
error:
|
||||
i915_gem_set_wedged(dev_priv);
|
||||
i915_gem_set_wedged(i915);
|
||||
i915_gem_retire_requests(i915);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_reset_engine - reset GPU engine to recover from a hang
|
||||
* @engine: engine to reset
|
||||
* @flags: options
|
||||
*
|
||||
* Reset a specific GPU engine. Useful if a hang is detected.
|
||||
* Returns zero on successful reset or otherwise an error code.
|
||||
*
|
||||
* Procedure is:
|
||||
* - identifies the request that caused the hang and it is dropped
|
||||
* - reset engine (which will force the engine to idle)
|
||||
* - re-init/configure engine
|
||||
*/
|
||||
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
|
||||
{
|
||||
struct i915_gpu_error *error = &engine->i915->gpu_error;
|
||||
struct drm_i915_gem_request *active_request;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
|
||||
|
||||
if (!(flags & I915_RESET_QUIET)) {
|
||||
dev_notice(engine->i915->drm.dev,
|
||||
"Resetting %s after gpu hang\n", engine->name);
|
||||
}
|
||||
error->reset_engine_count[engine->id]++;
|
||||
|
||||
active_request = i915_gem_reset_prepare_engine(engine);
|
||||
if (IS_ERR(active_request)) {
|
||||
DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
|
||||
ret = PTR_ERR(active_request);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
|
||||
if (ret) {
|
||||
/* If we fail here, we expect to fallback to a global reset */
|
||||
DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
|
||||
engine->name, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The request that caused the hang is stuck on elsp, we know the
|
||||
* active request and can drop it, adjust head to skip the offending
|
||||
* request to resume executing remaining requests in the queue.
|
||||
*/
|
||||
i915_gem_reset_engine(engine, active_request);
|
||||
|
||||
/*
|
||||
* The engine and its registers (and workarounds in case of render)
|
||||
* have been reset to their default values. Follow the init_ring
|
||||
* process to program RING_MODE, HWSP and re-enable submission.
|
||||
*/
|
||||
ret = engine->init_hw(engine);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
i915_gem_reset_finish_engine(engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_pm_suspend(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
@@ -2657,6 +2730,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
@@ -2665,12 +2740,11 @@ static struct drm_driver driver = {
|
||||
*/
|
||||
.driver_features =
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
|
||||
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
|
||||
.release = i915_driver_release,
|
||||
.open = i915_driver_open,
|
||||
.lastclose = i915_driver_lastclose,
|
||||
.postclose = i915_driver_postclose,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
|
||||
.gem_close_object = i915_gem_close_object,
|
||||
.gem_free_object_unlocked = i915_gem_free_object,
|
||||
@@ -2683,7 +2757,6 @@ static struct drm_driver driver = {
|
||||
|
||||
.dumb_create = i915_gem_dumb_create,
|
||||
.dumb_map_offset = i915_gem_mmap_gtt,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
.ioctls = i915_ioctls,
|
||||
.num_ioctls = ARRAY_SIZE(i915_ioctls),
|
||||
.fops = &i915_driver_fops,
|
||||
|
@@ -80,8 +80,8 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20170619"
|
||||
#define DRIVER_TIMESTAMP 1497857498
|
||||
#define DRIVER_DATE "20170818"
|
||||
#define DRIVER_TIMESTAMP 1503088845
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
@@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
|
||||
static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
|
||||
{
|
||||
uint_fixed_16_16_t fp;
|
||||
|
||||
@@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
|
||||
return fp;
|
||||
}
|
||||
|
||||
static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
|
||||
static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
|
||||
{
|
||||
return DIV_ROUND_UP(fp.val, 1 << 16);
|
||||
}
|
||||
|
||||
static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
|
||||
static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
|
||||
{
|
||||
return fp.val >> 16;
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
|
||||
static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
|
||||
uint_fixed_16_16_t min2)
|
||||
{
|
||||
uint_fixed_16_16_t min;
|
||||
@@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
|
||||
return min;
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
|
||||
static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
|
||||
uint_fixed_16_16_t max2)
|
||||
{
|
||||
uint_fixed_16_16_t max;
|
||||
@@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
|
||||
return max;
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
|
||||
{
|
||||
uint_fixed_16_16_t fp;
|
||||
WARN_ON(val >> 32);
|
||||
fp.val = clamp_t(uint32_t, val, 0, ~0);
|
||||
return fp;
|
||||
}
|
||||
|
||||
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
|
||||
uint_fixed_16_16_t d)
|
||||
{
|
||||
@@ -170,48 +178,30 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
|
||||
uint_fixed_16_16_t mul)
|
||||
{
|
||||
uint64_t intermediate_val;
|
||||
uint32_t result;
|
||||
|
||||
intermediate_val = (uint64_t) val * mul.val;
|
||||
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
|
||||
WARN_ON(intermediate_val >> 32);
|
||||
result = clamp_t(uint32_t, intermediate_val, 0, ~0);
|
||||
return result;
|
||||
return clamp_t(uint32_t, intermediate_val, 0, ~0);
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
|
||||
uint_fixed_16_16_t mul)
|
||||
{
|
||||
uint64_t intermediate_val;
|
||||
uint_fixed_16_16_t fp;
|
||||
|
||||
intermediate_val = (uint64_t) val.val * mul.val;
|
||||
intermediate_val = intermediate_val >> 16;
|
||||
WARN_ON(intermediate_val >> 32);
|
||||
fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
|
||||
return fp;
|
||||
return clamp_u64_to_fixed16(intermediate_val);
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d)
|
||||
static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
|
||||
{
|
||||
uint_fixed_16_16_t fp, res;
|
||||
|
||||
fp = u32_to_fixed_16_16(val);
|
||||
res.val = DIV_ROUND_UP(fp.val, d);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
|
||||
{
|
||||
uint_fixed_16_16_t res;
|
||||
uint64_t interm_val;
|
||||
|
||||
interm_val = (uint64_t)val << 16;
|
||||
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
|
||||
WARN_ON(interm_val >> 32);
|
||||
res.val = (uint32_t) interm_val;
|
||||
|
||||
return res;
|
||||
return clamp_u64_to_fixed16(interm_val);
|
||||
}
|
||||
|
||||
static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
|
||||
@@ -225,16 +215,32 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
|
||||
return clamp_t(uint32_t, interm_val, 0, ~0);
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
|
||||
static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
|
||||
uint_fixed_16_16_t mul)
|
||||
{
|
||||
uint64_t intermediate_val;
|
||||
uint_fixed_16_16_t fp;
|
||||
|
||||
intermediate_val = (uint64_t) val * mul.val;
|
||||
WARN_ON(intermediate_val >> 32);
|
||||
fp.val = (uint32_t) intermediate_val;
|
||||
return fp;
|
||||
return clamp_u64_to_fixed16(intermediate_val);
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
|
||||
uint_fixed_16_16_t add2)
|
||||
{
|
||||
uint64_t interm_sum;
|
||||
|
||||
interm_sum = (uint64_t) add1.val + add2.val;
|
||||
return clamp_u64_to_fixed16(interm_sum);
|
||||
}
|
||||
|
||||
static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
|
||||
uint32_t add2)
|
||||
{
|
||||
uint64_t interm_sum;
|
||||
uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
|
||||
|
||||
interm_sum = (uint64_t) add1.val + interm_add2.val;
|
||||
return clamp_u64_to_fixed16(interm_sum);
|
||||
}
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
@@ -584,8 +590,7 @@ struct drm_i915_file_private {
|
||||
struct idr context_idr;
|
||||
|
||||
struct intel_rps_client {
|
||||
struct list_head link;
|
||||
unsigned boosts;
|
||||
atomic_t boosts;
|
||||
} rps;
|
||||
|
||||
unsigned int bsd_engine;
|
||||
@@ -597,7 +602,7 @@ struct drm_i915_file_private {
|
||||
* to limit the badly behaving clients access to gpu.
|
||||
*/
|
||||
#define I915_MAX_CLIENT_CONTEXT_BANS 3
|
||||
int context_bans;
|
||||
atomic_t context_bans;
|
||||
};
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
@@ -641,6 +646,7 @@ struct intel_opregion {
|
||||
u32 swsci_sbcb_sub_functions;
|
||||
struct opregion_asle *asle;
|
||||
void *rvda;
|
||||
void *vbt_firmware;
|
||||
const void *vbt;
|
||||
u32 vbt_size;
|
||||
u32 *lid_state;
|
||||
@@ -710,11 +716,6 @@ struct drm_i915_display_funcs {
|
||||
void (*fdi_link_train)(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
|
||||
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags);
|
||||
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
@@ -753,6 +754,7 @@ struct intel_csr {
|
||||
func(has_csr); \
|
||||
func(has_ddi); \
|
||||
func(has_dp_mst); \
|
||||
func(has_reset_engine); \
|
||||
func(has_fbc); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_full_ppgtt); \
|
||||
@@ -917,6 +919,7 @@ struct i915_gpu_state {
|
||||
enum intel_engine_hangcheck_action hangcheck_action;
|
||||
struct i915_address_space *vm;
|
||||
int num_requests;
|
||||
u32 reset_count;
|
||||
|
||||
/* position of active request inside the ring */
|
||||
u32 rq_head, rq_post, rq_tail;
|
||||
@@ -1056,6 +1059,11 @@ struct intel_fbc {
|
||||
bool underrun_detected;
|
||||
struct work_struct underrun_work;
|
||||
|
||||
/*
|
||||
* Due to the atomic rules we can't access some structures without the
|
||||
* appropriate locking, so we cache information here in order to avoid
|
||||
* these problems.
|
||||
*/
|
||||
struct intel_fbc_state_cache {
|
||||
struct i915_vma *vma;
|
||||
|
||||
@@ -1077,6 +1085,13 @@ struct intel_fbc {
|
||||
} fb;
|
||||
} state_cache;
|
||||
|
||||
/*
|
||||
* This structure contains everything that's relevant to program the
|
||||
* hardware registers. When we want to figure out if we need to disable
|
||||
* and re-enable FBC for a new configuration we just check if there's
|
||||
* something different in the struct. The genx_fbc_activate functions
|
||||
* are supposed to read from it in order to program the registers.
|
||||
*/
|
||||
struct intel_fbc_reg_params {
|
||||
struct i915_vma *vma;
|
||||
|
||||
@@ -1149,11 +1164,11 @@ struct i915_psr {
|
||||
enum intel_pch {
|
||||
PCH_NONE = 0, /* No PCH present */
|
||||
PCH_IBX, /* Ibexpeak PCH */
|
||||
PCH_CPT, /* Cougarpoint PCH */
|
||||
PCH_LPT, /* Lynxpoint PCH */
|
||||
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
|
||||
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
|
||||
PCH_SPT, /* Sunrisepoint PCH */
|
||||
PCH_KBP, /* Kabypoint PCH */
|
||||
PCH_CNP, /* Cannonpoint PCH */
|
||||
PCH_KBP, /* Kaby Lake PCH */
|
||||
PCH_CNP, /* Cannon Lake PCH */
|
||||
PCH_NOP,
|
||||
};
|
||||
|
||||
@@ -1166,6 +1181,7 @@ enum intel_sbi_destination {
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
|
||||
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
|
||||
#define QUIRK_INCREASE_T12_DELAY (1<<6)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
@@ -1301,13 +1317,10 @@ struct intel_gen6_power_mgmt {
|
||||
int last_adj;
|
||||
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
|
||||
|
||||
spinlock_t client_lock;
|
||||
struct list_head clients;
|
||||
bool client_boost;
|
||||
|
||||
bool enabled;
|
||||
struct delayed_work autoenable_work;
|
||||
unsigned boosts;
|
||||
atomic_t num_waiters;
|
||||
atomic_t boosts;
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei ei;
|
||||
@@ -1383,12 +1396,23 @@ struct i915_power_well {
|
||||
bool hw_enabled;
|
||||
u64 domains;
|
||||
/* unique identifier for this power well */
|
||||
unsigned long id;
|
||||
enum i915_power_well_id id;
|
||||
/*
|
||||
* Arbitraty data associated with this power well. Platform and power
|
||||
* well specific.
|
||||
*/
|
||||
unsigned long data;
|
||||
union {
|
||||
struct {
|
||||
enum dpio_phy phy;
|
||||
} bxt;
|
||||
struct {
|
||||
/* Mask of pipes whose IRQ logic is backed by the pw */
|
||||
u8 irq_pipe_mask;
|
||||
/* The pw is backing the VGA functionality */
|
||||
bool has_vga:1;
|
||||
bool has_fuses:1;
|
||||
} hsw;
|
||||
};
|
||||
const struct i915_power_well_ops *ops;
|
||||
};
|
||||
|
||||
@@ -1505,6 +1529,8 @@ struct i915_gpu_error {
|
||||
/* Protected by the above dev->gpu_error.lock. */
|
||||
struct i915_gpu_state *first_error;
|
||||
|
||||
atomic_t pending_fb_pin;
|
||||
|
||||
unsigned long missed_irq_rings;
|
||||
|
||||
/**
|
||||
@@ -1550,6 +1576,12 @@ struct i915_gpu_error {
|
||||
* inspect the bit and do the reset directly, otherwise the worker
|
||||
* waits for the struct_mutex.
|
||||
*
|
||||
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
|
||||
* acquire the struct_mutex to reset an engine, we need an explicit
|
||||
* flag to prevent two concurrent reset attempts in the same engine.
|
||||
* As the number of engines continues to grow, allocate the flags from
|
||||
* the most significant bits.
|
||||
*
|
||||
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
|
||||
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
|
||||
* i915_gem_request_alloc(), this bit is checked and the sequence
|
||||
@@ -1558,7 +1590,12 @@ struct i915_gpu_error {
|
||||
unsigned long flags;
|
||||
#define I915_RESET_BACKOFF 0
|
||||
#define I915_RESET_HANDOFF 1
|
||||
#define I915_RESET_MODESET 2
|
||||
#define I915_WEDGED (BITS_PER_LONG - 1)
|
||||
#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
|
||||
|
||||
/** Number of times an engine has been reset */
|
||||
u32 reset_engine_count[I915_NUM_ENGINES];
|
||||
|
||||
/**
|
||||
* Waitqueue to signal when a hang is detected. Used to for waiters
|
||||
@@ -1869,6 +1906,7 @@ struct i915_workarounds {
|
||||
|
||||
struct i915_virtual_gpu {
|
||||
bool active;
|
||||
u32 caps;
|
||||
};
|
||||
|
||||
/* used in computing the new watermarks state */
|
||||
@@ -1888,6 +1926,24 @@ struct i915_oa_reg {
|
||||
u32 value;
|
||||
};
|
||||
|
||||
struct i915_oa_config {
|
||||
char uuid[UUID_STRING_LEN + 1];
|
||||
int id;
|
||||
|
||||
const struct i915_oa_reg *mux_regs;
|
||||
u32 mux_regs_len;
|
||||
const struct i915_oa_reg *b_counter_regs;
|
||||
u32 b_counter_regs_len;
|
||||
const struct i915_oa_reg *flex_regs;
|
||||
u32 flex_regs_len;
|
||||
|
||||
struct attribute_group sysfs_metric;
|
||||
struct attribute *attrs[2];
|
||||
struct device_attribute sysfs_metric_id;
|
||||
|
||||
atomic_t ref_count;
|
||||
};
|
||||
|
||||
struct i915_perf_stream;
|
||||
|
||||
/**
|
||||
@@ -2000,12 +2056,36 @@ struct i915_perf_stream {
|
||||
* type of configured stream.
|
||||
*/
|
||||
const struct i915_perf_stream_ops *ops;
|
||||
|
||||
/**
|
||||
* @oa_config: The OA configuration used by the stream.
|
||||
*/
|
||||
struct i915_oa_config *oa_config;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i915_oa_ops - Gen specific implementation of an OA unit stream
|
||||
*/
|
||||
struct i915_oa_ops {
|
||||
/**
|
||||
* @is_valid_b_counter_reg: Validates register's address for
|
||||
* programming boolean counters for a particular platform.
|
||||
*/
|
||||
bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
|
||||
u32 addr);
|
||||
|
||||
/**
|
||||
* @is_valid_mux_reg: Validates register's address for programming mux
|
||||
* for a particular platform.
|
||||
*/
|
||||
bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
|
||||
|
||||
/**
|
||||
* @is_valid_flex_reg: Validates register's address for programming
|
||||
* flex EU filtering for a particular platform.
|
||||
*/
|
||||
bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
|
||||
|
||||
/**
|
||||
* @init_oa_buffer: Resets the head and tail pointers of the
|
||||
* circular buffer for periodic OA reports.
|
||||
@@ -2023,21 +2103,14 @@ struct i915_oa_ops {
|
||||
*/
|
||||
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
|
||||
|
||||
/**
|
||||
* @select_metric_set: The auto generated code that checks whether a
|
||||
* requested OA config is applicable to the system and if so sets up
|
||||
* the mux, oa and flex eu register config pointers according to the
|
||||
* current dev_priv->perf.oa.metrics_set.
|
||||
*/
|
||||
int (*select_metric_set)(struct drm_i915_private *dev_priv);
|
||||
|
||||
/**
|
||||
* @enable_metric_set: Selects and applies any MUX configuration to set
|
||||
* up the Boolean and Custom (B/C) counters that are part of the
|
||||
* counter reports being sampled. May apply system constraints such as
|
||||
* disabling EU clock gating as required.
|
||||
*/
|
||||
int (*enable_metric_set)(struct drm_i915_private *dev_priv);
|
||||
int (*enable_metric_set)(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_config *oa_config);
|
||||
|
||||
/**
|
||||
* @disable_metric_set: Remove system constraints associated with using
|
||||
@@ -2083,6 +2156,7 @@ struct drm_i915_private {
|
||||
|
||||
struct kmem_cache *objects;
|
||||
struct kmem_cache *vmas;
|
||||
struct kmem_cache *luts;
|
||||
struct kmem_cache *requests;
|
||||
struct kmem_cache *dependencies;
|
||||
struct kmem_cache *priorities;
|
||||
@@ -2133,9 +2207,6 @@ struct drm_i915_private {
|
||||
/* protects the irq masks */
|
||||
spinlock_t irq_lock;
|
||||
|
||||
/* protects the mmio flip data */
|
||||
spinlock_t mmio_flip_lock;
|
||||
|
||||
bool display_irqs_enabled;
|
||||
|
||||
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
|
||||
@@ -2236,18 +2307,10 @@ struct drm_i915_private {
|
||||
DECLARE_HASHTABLE(mm_structs, 7);
|
||||
struct mutex mm_lock;
|
||||
|
||||
/* The hw wants to have a stable context identifier for the lifetime
|
||||
* of the context (for OA, PASID, faults, etc). This is limited
|
||||
* in execlists to 21 bits.
|
||||
*/
|
||||
struct ida context_hw_ida;
|
||||
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
|
||||
|
||||
/* Kernel Modesetting */
|
||||
|
||||
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
|
||||
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
|
||||
wait_queue_head_t pending_flip_queue;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
|
||||
@@ -2303,11 +2366,9 @@ struct drm_i915_private {
|
||||
|
||||
struct drm_i915_gem_object *vlv_pctx;
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
/* list of fbdev register on this device */
|
||||
struct intel_fbdev *fbdev;
|
||||
struct work_struct fbdev_suspend_work;
|
||||
#endif
|
||||
|
||||
struct drm_property *broadcast_rgb_property;
|
||||
struct drm_property *force_audio_property;
|
||||
@@ -2321,7 +2382,18 @@ struct drm_i915_private {
|
||||
*/
|
||||
struct mutex av_mutex;
|
||||
|
||||
struct list_head context_list;
|
||||
struct {
|
||||
struct list_head list;
|
||||
struct llist_head free_list;
|
||||
struct work_struct free_work;
|
||||
|
||||
/* The hw wants to have a stable context identifier for the
|
||||
* lifetime of the context (for OA, PASID, faults, etc).
|
||||
* This is limited in execlists to 21 bits.
|
||||
*/
|
||||
struct ida hw_ida;
|
||||
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
|
||||
} contexts;
|
||||
|
||||
u32 fdi_rx_config;
|
||||
|
||||
@@ -2399,10 +2471,32 @@ struct drm_i915_private {
|
||||
struct kobject *metrics_kobj;
|
||||
struct ctl_table_header *sysctl_header;
|
||||
|
||||
/*
|
||||
* Lock associated with adding/modifying/removing OA configs
|
||||
* in dev_priv->perf.metrics_idr.
|
||||
*/
|
||||
struct mutex metrics_lock;
|
||||
|
||||
/*
|
||||
* List of dynamic configurations, you need to hold
|
||||
* dev_priv->perf.metrics_lock to access it.
|
||||
*/
|
||||
struct idr metrics_idr;
|
||||
|
||||
/*
|
||||
* Lock associated with anything below within this structure
|
||||
* except exclusive_stream.
|
||||
*/
|
||||
struct mutex lock;
|
||||
struct list_head streams;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* The stream currently using the OA unit. If accessed
|
||||
* outside a syscall associated to its file
|
||||
* descriptor, you need to hold
|
||||
* dev_priv->drm.struct_mutex.
|
||||
*/
|
||||
struct i915_perf_stream *exclusive_stream;
|
||||
|
||||
u32 specific_ctx_id;
|
||||
@@ -2421,16 +2515,7 @@ struct drm_i915_private {
|
||||
int period_exponent;
|
||||
int timestamp_frequency;
|
||||
|
||||
int metrics_set;
|
||||
|
||||
const struct i915_oa_reg *mux_regs[6];
|
||||
int mux_regs_lens[6];
|
||||
int n_mux_configs;
|
||||
|
||||
const struct i915_oa_reg *b_counter_regs;
|
||||
int b_counter_regs_len;
|
||||
const struct i915_oa_reg *flex_regs;
|
||||
int flex_regs_len;
|
||||
struct i915_oa_config test_config;
|
||||
|
||||
struct {
|
||||
struct i915_vma *vma;
|
||||
@@ -2517,7 +2602,6 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_oa_ops ops;
|
||||
const struct i915_oa_format *oa_formats;
|
||||
int n_builtin_sets;
|
||||
} oa;
|
||||
} perf;
|
||||
|
||||
@@ -2996,16 +3080,17 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
|
||||
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
|
||||
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
|
||||
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
|
||||
#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
|
||||
#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
|
||||
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
|
||||
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
|
||||
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
@@ -3020,9 +3105,11 @@ intel_info(const struct drm_i915_private *dev_priv)
|
||||
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
|
||||
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
|
||||
#define HAS_PCH_LPT_LP(dev_priv) \
|
||||
((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
|
||||
((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
|
||||
(dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
|
||||
#define HAS_PCH_LPT_H(dev_priv) \
|
||||
((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
|
||||
((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
|
||||
(dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
|
||||
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
|
||||
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
|
||||
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
|
||||
@@ -3088,7 +3175,13 @@ extern int i915_driver_load(struct pci_dev *pdev,
|
||||
extern void i915_driver_unload(struct drm_device *dev);
|
||||
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
|
||||
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
|
||||
extern void i915_reset(struct drm_i915_private *dev_priv);
|
||||
|
||||
#define I915_RESET_QUIET BIT(0)
|
||||
extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
|
||||
extern int i915_reset_engine(struct intel_engine_cs *engine,
|
||||
unsigned int flags);
|
||||
|
||||
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
|
||||
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
|
||||
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
|
||||
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
|
||||
@@ -3107,7 +3200,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
||||
enum port intel_hpd_pin_to_port(enum hpd_pin pin);
|
||||
enum hpd_pin intel_hpd_pin(enum port port);
|
||||
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
|
||||
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
|
||||
|
||||
@@ -3276,6 +3370,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
|
||||
} while (flush_work(&i915->mm.free_work));
|
||||
}
|
||||
|
||||
static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
|
||||
{
|
||||
/*
|
||||
* Similar to objects above (see i915_gem_drain_freed-objects), in
|
||||
* general we have workers that are armed by RCU and then rearm
|
||||
* themselves in their callbacks. To be paranoid, we need to
|
||||
* drain the workqueue a second time after waiting for the RCU
|
||||
* grace period so that we catch work queued via RCU from the first
|
||||
* pass. As neither drain_workqueue() nor flush_workqueue() report
|
||||
* a result, we make an assumption that we only don't require more
|
||||
* than 2 passes to catch all recursive RCU delayed work.
|
||||
*
|
||||
*/
|
||||
int pass = 2;
|
||||
do {
|
||||
rcu_barrier();
|
||||
drain_workqueue(i915->wq);
|
||||
} while (--pass);
|
||||
}
|
||||
|
||||
struct i915_vma * __must_check
|
||||
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view,
|
||||
@@ -3461,11 +3575,22 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
||||
return READ_ONCE(error->reset_count);
|
||||
}
|
||||
|
||||
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
return READ_ONCE(error->reset_engine_count[engine->id]);
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
|
||||
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_reset(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
|
||||
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
|
||||
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_reset_engine(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *request);
|
||||
|
||||
void i915_gem_init_mmio(struct drm_i915_private *i915);
|
||||
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
|
||||
@@ -3499,7 +3624,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align);
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
@@ -3530,41 +3655,26 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
|
||||
static inline struct i915_gem_context *
|
||||
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
|
||||
{
|
||||
return idr_find(&file_priv->context_idr, id);
|
||||
}
|
||||
|
||||
static inline struct i915_gem_context *
|
||||
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
|
||||
|
||||
ctx = idr_find(&file_priv->context_idr, id);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOENT);
|
||||
rcu_read_lock();
|
||||
ctx = __i915_gem_context_lookup_rcu(file_priv, id);
|
||||
if (ctx && !kref_get_unless_zero(&ctx->ref))
|
||||
ctx = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static inline struct i915_gem_context *
|
||||
i915_gem_context_get(struct i915_gem_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
|
||||
{
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
kref_put(&ctx->ref, i915_gem_context_free);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct mutex *lock = &ctx->i915->drm.struct_mutex;
|
||||
|
||||
if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
|
||||
mutex_unlock(lock);
|
||||
}
|
||||
|
||||
static inline struct intel_timeline *
|
||||
i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
@@ -3577,6 +3687,10 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
|
||||
|
||||
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx,
|
||||
uint32_t *reg_state);
|
||||
@@ -4064,6 +4178,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
|
||||
|
||||
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
|
||||
{
|
||||
/* nsecs_to_jiffies64() does not guard against overflow */
|
||||
if (NSEC_PER_SEC % HZ &&
|
||||
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
|
||||
}
|
||||
|
||||
@@ -4210,10 +4329,11 @@ int remap_io_mapping(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn, unsigned long size,
|
||||
struct io_mapping *iomap);
|
||||
|
||||
static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
|
||||
static inline bool
|
||||
intel_engine_can_store_dword(struct intel_engine_cs *engine)
|
||||
{
|
||||
return (obj->cache_level != I915_CACHE_NONE ||
|
||||
HAS_LLC(to_i915(obj->base.dev)));
|
||||
return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
|
||||
engine->class);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -52,7 +52,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
if (obj->cache_dirty)
|
||||
return false;
|
||||
|
||||
if (!obj->cache_coherent)
|
||||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
||||
return true;
|
||||
|
||||
return obj->pin_display;
|
||||
@@ -253,7 +253,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
|
||||
if (needs_clflush &&
|
||||
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
|
||||
!obj->cache_coherent)
|
||||
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
||||
drm_clflush_sg(pages);
|
||||
|
||||
__start_cpu_write(obj);
|
||||
@@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
|
||||
*/
|
||||
if (rps) {
|
||||
if (INTEL_GEN(rq->i915) >= 6)
|
||||
gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
|
||||
gen6_rps_boost(rq, rps);
|
||||
else
|
||||
rps = NULL;
|
||||
}
|
||||
@@ -399,22 +399,6 @@ out:
|
||||
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
|
||||
i915_gem_request_retire_upto(rq);
|
||||
|
||||
if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
|
||||
/* The GPU is now idle and this client has stalled.
|
||||
* Since no other client has submitted a request in the
|
||||
* meantime, assume that this client is the only one
|
||||
* supplying work to the GPU but is unable to keep that
|
||||
* work supplied because it is waiting. Since the GPU is
|
||||
* then never kept fully busy, RPS autoclocking will
|
||||
* keep the clocks relatively low, causing further delays.
|
||||
* Compensate by giving the synchronous client credit for
|
||||
* a waitboost next time.
|
||||
*/
|
||||
spin_lock(&rq->i915->rps.client_lock);
|
||||
list_del_init(&rps->link);
|
||||
spin_unlock(&rq->i915->rps.client_lock);
|
||||
}
|
||||
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@@ -577,46 +561,6 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
|
||||
return &fpriv->rps;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (align > obj->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->ops == &i915_gem_phys_ops)
|
||||
return 0;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
if (obj->mm.pages)
|
||||
return -EBUSY;
|
||||
|
||||
GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err_xfer;
|
||||
|
||||
return 0;
|
||||
|
||||
err_xfer:
|
||||
obj->ops = &i915_gem_object_ops;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
@@ -856,7 +800,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@@ -908,7 +853,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
|
||||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@@ -2756,34 +2702,38 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ban_context(const struct i915_gem_context *ctx)
|
||||
static bool ban_context(const struct i915_gem_context *ctx,
|
||||
unsigned int score)
|
||||
{
|
||||
return (i915_gem_context_is_bannable(ctx) &&
|
||||
ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
|
||||
score >= CONTEXT_SCORE_BAN_THRESHOLD);
|
||||
}
|
||||
|
||||
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
|
||||
{
|
||||
ctx->guilty_count++;
|
||||
ctx->ban_score += CONTEXT_SCORE_GUILTY;
|
||||
if (ban_context(ctx))
|
||||
i915_gem_context_set_banned(ctx);
|
||||
unsigned int score;
|
||||
bool banned;
|
||||
|
||||
atomic_inc(&ctx->guilty_count);
|
||||
|
||||
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
|
||||
banned = ban_context(ctx, score);
|
||||
DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
|
||||
ctx->name, ctx->ban_score,
|
||||
yesno(i915_gem_context_is_banned(ctx)));
|
||||
|
||||
if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
|
||||
ctx->name, score, yesno(banned));
|
||||
if (!banned)
|
||||
return;
|
||||
|
||||
ctx->file_priv->context_bans++;
|
||||
DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
|
||||
ctx->name, ctx->file_priv->context_bans);
|
||||
i915_gem_context_set_banned(ctx);
|
||||
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
|
||||
atomic_inc(&ctx->file_priv->context_bans);
|
||||
DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
|
||||
ctx->name, atomic_read(&ctx->file_priv->context_bans));
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
|
||||
{
|
||||
ctx->active_count++;
|
||||
atomic_inc(&ctx->active_count);
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
@@ -2832,46 +2782,62 @@ static bool engine_stalled(struct intel_engine_cs *engine)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure irq handler finishes, and not run again.
|
||||
* Also return the active request so that we only search for it once.
|
||||
*/
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request = NULL;
|
||||
|
||||
/* Prevent the signaler thread from updating the request
|
||||
* state (by calling dma_fence_signal) as we are processing
|
||||
* the reset. The write from the GPU of the seqno is
|
||||
* asynchronous and the signaler thread may see a different
|
||||
* value to us and declare the request complete, even though
|
||||
* the reset routine have picked that request as the active
|
||||
* (incomplete) request. This conflict is not handled
|
||||
* gracefully!
|
||||
*/
|
||||
kthread_park(engine->breadcrumbs.signaler);
|
||||
|
||||
/* Prevent request submission to the hardware until we have
|
||||
* completed the reset in i915_gem_reset_finish(). If a request
|
||||
* is completed by one engine, it may then queue a request
|
||||
* to a second via its engine->irq_tasklet *just* as we are
|
||||
* calling engine->init_hw() and also writing the ELSP.
|
||||
* Turning off the engine->irq_tasklet until the reset is over
|
||||
* prevents the race.
|
||||
*/
|
||||
tasklet_kill(&engine->irq_tasklet);
|
||||
tasklet_disable(&engine->irq_tasklet);
|
||||
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request && request->fence.error == -EIO)
|
||||
request = ERR_PTR(-EIO); /* Previous reset failed! */
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_request *request;
|
||||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
/* Ensure irq handler finishes, and not run again. */
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
/* Prevent the signaler thread from updating the request
|
||||
* state (by calling dma_fence_signal) as we are processing
|
||||
* the reset. The write from the GPU of the seqno is
|
||||
* asynchronous and the signaler thread may see a different
|
||||
* value to us and declare the request complete, even though
|
||||
* the reset routine have picked that request as the active
|
||||
* (incomplete) request. This conflict is not handled
|
||||
* gracefully!
|
||||
*/
|
||||
kthread_park(engine->breadcrumbs.signaler);
|
||||
|
||||
/* Prevent request submission to the hardware until we have
|
||||
* completed the reset in i915_gem_reset_finish(). If a request
|
||||
* is completed by one engine, it may then queue a request
|
||||
* to a second via its engine->irq_tasklet *just* as we are
|
||||
* calling engine->init_hw() and also writing the ELSP.
|
||||
* Turning off the engine->irq_tasklet until the reset is over
|
||||
* prevents the race.
|
||||
*/
|
||||
tasklet_kill(&engine->irq_tasklet);
|
||||
tasklet_disable(&engine->irq_tasklet);
|
||||
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
if (engine_stalled(engine)) {
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request && request->fence.error == -EIO)
|
||||
err = -EIO; /* Previous reset failed! */
|
||||
request = i915_gem_reset_prepare_engine(engine);
|
||||
if (IS_ERR(request)) {
|
||||
err = PTR_ERR(request);
|
||||
continue;
|
||||
}
|
||||
|
||||
engine->hangcheck.active_request = request;
|
||||
}
|
||||
|
||||
i915_gem_revoke_fences(dev_priv);
|
||||
@@ -2921,12 +2887,11 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
}
|
||||
|
||||
/* Returns true if the request was guilty of hang */
|
||||
static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
|
||||
/* Returns the request if it was guilty of the hang */
|
||||
static struct drm_i915_gem_request *
|
||||
i915_gem_reset_request(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
/* Read once and return the resolution */
|
||||
const bool guilty = engine_stalled(request->engine);
|
||||
|
||||
/* The guilty request will get skipped on a hung engine.
|
||||
*
|
||||
* Users of client default contexts do not rely on logical
|
||||
@@ -2948,29 +2913,47 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
|
||||
* subsequent hangs.
|
||||
*/
|
||||
|
||||
if (guilty) {
|
||||
if (engine_stalled(engine)) {
|
||||
i915_gem_context_mark_guilty(request->ctx);
|
||||
skip_request(request);
|
||||
} else {
|
||||
i915_gem_context_mark_innocent(request->ctx);
|
||||
dma_fence_set_error(&request->fence, -EAGAIN);
|
||||
}
|
||||
|
||||
return guilty;
|
||||
}
|
||||
|
||||
static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request && i915_gem_reset_request(request)) {
|
||||
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
|
||||
engine->name, request->global_seqno);
|
||||
|
||||
/* If this context is now banned, skip all pending requests. */
|
||||
if (i915_gem_context_is_banned(request->ctx))
|
||||
engine_skip_context(request);
|
||||
} else {
|
||||
/*
|
||||
* Since this is not the hung engine, it may have advanced
|
||||
* since the hang declaration. Double check by refinding
|
||||
* the active request at the time of the reset.
|
||||
*/
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
i915_gem_context_mark_innocent(request->ctx);
|
||||
dma_fence_set_error(&request->fence, -EAGAIN);
|
||||
|
||||
/* Rewind the engine to replay the incomplete rq */
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
request = list_prev_entry(request, link);
|
||||
if (&request->link == &engine->timeline->requests)
|
||||
request = NULL;
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
}
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
void i915_gem_reset_engine(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
engine->irq_posted = 0;
|
||||
|
||||
if (request)
|
||||
request = i915_gem_reset_request(engine, request);
|
||||
|
||||
if (request) {
|
||||
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
|
||||
engine->name, request->global_seqno);
|
||||
}
|
||||
|
||||
/* Setup the CS to resume from the breadcrumb of the hung request */
|
||||
@@ -2989,7 +2972,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
i915_gem_reset_engine(engine);
|
||||
i915_gem_reset_engine(engine, engine->hangcheck.active_request);
|
||||
ctx = fetch_and_zero(&engine->last_retired_context);
|
||||
if (ctx)
|
||||
engine->context_unpin(engine, ctx);
|
||||
@@ -3005,6 +2988,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
tasklet_enable(&engine->irq_tasklet);
|
||||
kthread_unpark(engine->breadcrumbs.signaler);
|
||||
}
|
||||
|
||||
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
@@ -3013,13 +3002,14 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
tasklet_enable(&engine->irq_tasklet);
|
||||
kthread_unpark(engine->breadcrumbs.signaler);
|
||||
engine->hangcheck.active_request = NULL;
|
||||
i915_gem_reset_finish_engine(engine);
|
||||
}
|
||||
}
|
||||
|
||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
i915_gem_request_submit(request);
|
||||
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
||||
@@ -3041,16 +3031,10 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
|
||||
/* Mark all executing requests as skipped */
|
||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||
list_for_each_entry(request, &engine->timeline->requests, link)
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
if (!i915_gem_request_completed(request))
|
||||
dma_fence_set_error(&request->fence, -EIO);
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
|
||||
/* Mark all pending requests as complete so that any concurrent
|
||||
* (lockless) lookup doesn't try and wait upon the request as we
|
||||
* reset it.
|
||||
*/
|
||||
intel_engine_init_global_seqno(engine,
|
||||
intel_engine_last_submit(engine));
|
||||
|
||||
/*
|
||||
* Clear the execlists queue up before freeing the requests, as those
|
||||
* are the ones that keep the context and ringbuffer backing objects
|
||||
@@ -3071,7 +3055,21 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
|
||||
engine->execlist_first = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
|
||||
/* The port is checked prior to scheduling a tasklet, but
|
||||
* just in case we have suspended the tasklet to do the
|
||||
* wedging make sure that when it wakes, it decides there
|
||||
* is no work to do by clearing the irq_posted bit.
|
||||
*/
|
||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||
}
|
||||
|
||||
/* Mark all pending requests as complete so that any concurrent
|
||||
* (lockless) lookup doesn't try and wait upon the request as we
|
||||
* reset it.
|
||||
*/
|
||||
intel_engine_init_global_seqno(engine,
|
||||
intel_engine_last_submit(engine));
|
||||
}
|
||||
|
||||
static int __i915_gem_set_wedged_BKL(void *data)
|
||||
@@ -3083,25 +3081,15 @@ static int __i915_gem_set_wedged_BKL(void *data)
|
||||
for_each_engine(engine, i915, id)
|
||||
engine_set_wedged(engine);
|
||||
|
||||
set_bit(I915_WEDGED, &i915->gpu_error.flags);
|
||||
wake_up_all(&i915->gpu_error.reset_queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
|
||||
|
||||
/* Retire completed requests first so the list of inflight/incomplete
|
||||
* requests is accurate and we don't try and mark successful requests
|
||||
* as in error during __i915_gem_set_wedged_BKL().
|
||||
*/
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
|
||||
|
||||
i915_gem_context_lost(dev_priv);
|
||||
|
||||
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
|
||||
}
|
||||
|
||||
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||
@@ -3156,6 +3144,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||
* context and do not require stop_machine().
|
||||
*/
|
||||
intel_engines_reset_default_submission(i915);
|
||||
i915_gem_contexts_lost(i915);
|
||||
|
||||
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
|
||||
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
|
||||
@@ -3253,25 +3242,33 @@ out_rearm:
|
||||
|
||||
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(gem->dev);
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem);
|
||||
struct drm_i915_file_private *fpriv = file->driver_priv;
|
||||
struct i915_vma *vma, *vn;
|
||||
struct i915_lut_handle *lut, *ln;
|
||||
|
||||
mutex_lock(&obj->base.dev->struct_mutex);
|
||||
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
|
||||
if (vma->vm->file == fpriv)
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
|
||||
struct i915_gem_context *ctx = lut->ctx;
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (ctx->file_priv != fpriv)
|
||||
continue;
|
||||
|
||||
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
|
||||
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
i915_vma_close(vma);
|
||||
|
||||
vma = obj->vma_hashed;
|
||||
if (vma && vma->ctx->file_priv == fpriv)
|
||||
i915_vma_unlink_ctx(vma);
|
||||
list_del(&lut->obj_link);
|
||||
list_del(&lut->ctx_link);
|
||||
|
||||
if (i915_gem_object_is_active(obj) &&
|
||||
!i915_gem_object_has_active_reference(obj)) {
|
||||
i915_gem_object_set_active_reference(obj);
|
||||
i915_gem_object_get(obj);
|
||||
kmem_cache_free(i915->luts, lut);
|
||||
__i915_gem_object_release_unless_active(obj);
|
||||
}
|
||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static unsigned long to_wait_timeout(s64 timeout_ns)
|
||||
@@ -3297,7 +3294,7 @@ static unsigned long to_wait_timeout(s64 timeout_ns)
|
||||
* -ERESTARTSYS: signal interrupted the wait
|
||||
* -ENONENT: object doesn't exist
|
||||
* Also possible, but rare:
|
||||
* -EAGAIN: GPU wedged
|
||||
* -EAGAIN: incomplete, restart syscall
|
||||
* -ENOMEM: damn
|
||||
* -ENODEV: Internal IRQ fail
|
||||
* -E?: The add request failed
|
||||
@@ -3345,6 +3342,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
*/
|
||||
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
|
||||
args->timeout_ns = 0;
|
||||
|
||||
/* Asked to wait beyond the jiffie/scheduler precision? */
|
||||
if (ret == -ETIME && args->timeout_ns)
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
@@ -3686,8 +3687,7 @@ restart:
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
vma->node.color = cache_level;
|
||||
obj->cache_level = cache_level;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||
obj->cache_dirty = true; /* Always invalidate stale cachelines */
|
||||
|
||||
return 0;
|
||||
@@ -4260,6 +4260,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
INIT_LIST_HEAD(&obj->global_link);
|
||||
INIT_LIST_HEAD(&obj->userfault_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->lut_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
|
||||
obj->ops = ops;
|
||||
@@ -4292,6 +4293,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
unsigned int cache_level;
|
||||
gfp_t mask;
|
||||
int ret;
|
||||
|
||||
@@ -4330,7 +4332,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
|
||||
if (HAS_LLC(dev_priv)) {
|
||||
if (HAS_LLC(dev_priv))
|
||||
/* On some devices, we can have the GPU use the LLC (the CPU
|
||||
* cache) for about a 10% performance improvement
|
||||
* compared to uncached. Graphics requests other than
|
||||
@@ -4343,12 +4345,11 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
* However, we maintain the display planes as UC, and so
|
||||
* need to rebind when first used as such.
|
||||
*/
|
||||
obj->cache_level = I915_CACHE_LLC;
|
||||
} else
|
||||
obj->cache_level = I915_CACHE_NONE;
|
||||
cache_level = I915_CACHE_LLC;
|
||||
else
|
||||
cache_level = I915_CACHE_NONE;
|
||||
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||
|
||||
trace_i915_gem_object_create(obj);
|
||||
|
||||
@@ -4503,8 +4504,8 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
|
||||
GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
|
||||
if (i915_gem_object_is_active(obj))
|
||||
if (!i915_gem_object_has_active_reference(obj) &&
|
||||
i915_gem_object_is_active(obj))
|
||||
i915_gem_object_set_active_reference(obj);
|
||||
else
|
||||
i915_gem_object_put(obj);
|
||||
@@ -4565,7 +4566,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
||||
goto err_unlock;
|
||||
|
||||
assert_kernel_context_is_current(dev_priv);
|
||||
i915_gem_context_lost(dev_priv);
|
||||
i915_gem_contexts_lost(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_guc_suspend(dev_priv);
|
||||
@@ -4579,8 +4580,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
||||
while (flush_delayed_work(&dev_priv->gt.idle_work))
|
||||
;
|
||||
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
/* Assert that we sucessfully flushed all the work and
|
||||
* reset the GPU back to its idle, low power state.
|
||||
*/
|
||||
@@ -4812,7 +4811,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = i915_gem_context_init(dev_priv);
|
||||
ret = i915_gem_contexts_init(dev_priv);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -4898,12 +4897,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||
if (!dev_priv->vmas)
|
||||
goto err_objects;
|
||||
|
||||
dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
|
||||
if (!dev_priv->luts)
|
||||
goto err_vmas;
|
||||
|
||||
dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
|
||||
SLAB_HWCACHE_ALIGN |
|
||||
SLAB_RECLAIM_ACCOUNT |
|
||||
SLAB_TYPESAFE_BY_RCU);
|
||||
if (!dev_priv->requests)
|
||||
goto err_vmas;
|
||||
goto err_luts;
|
||||
|
||||
dev_priv->dependencies = KMEM_CACHE(i915_dependency,
|
||||
SLAB_HWCACHE_ALIGN |
|
||||
@@ -4922,7 +4925,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||
if (err)
|
||||
goto err_priorities;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->context_list);
|
||||
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
|
||||
init_llist_head(&dev_priv->mm.free_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
||||
@@ -4936,8 +4938,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
|
||||
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
|
||||
|
||||
init_waitqueue_head(&dev_priv->pending_flip_queue);
|
||||
|
||||
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
|
||||
|
||||
spin_lock_init(&dev_priv->fb_tracking.lock);
|
||||
@@ -4950,6 +4950,8 @@ err_dependencies:
|
||||
kmem_cache_destroy(dev_priv->dependencies);
|
||||
err_requests:
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
err_luts:
|
||||
kmem_cache_destroy(dev_priv->luts);
|
||||
err_vmas:
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
err_objects:
|
||||
@@ -4972,6 +4974,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
|
||||
kmem_cache_destroy(dev_priv->priorities);
|
||||
kmem_cache_destroy(dev_priv->dependencies);
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->luts);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
|
||||
@@ -5038,15 +5041,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
||||
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
|
||||
request->file_priv = NULL;
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
if (!list_empty(&file_priv->rps.link)) {
|
||||
spin_lock(&to_i915(dev)->rps.client_lock);
|
||||
list_del(&file_priv->rps.link);
|
||||
spin_unlock(&to_i915(dev)->rps.client_lock);
|
||||
}
|
||||
}
|
||||
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
||||
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv;
|
||||
int ret;
|
||||
@@ -5058,16 +5055,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
|
||||
return -ENOMEM;
|
||||
|
||||
file->driver_priv = file_priv;
|
||||
file_priv->dev_priv = to_i915(dev);
|
||||
file_priv->dev_priv = i915;
|
||||
file_priv->file = file;
|
||||
INIT_LIST_HEAD(&file_priv->rps.link);
|
||||
|
||||
spin_lock_init(&file_priv->mm.lock);
|
||||
INIT_LIST_HEAD(&file_priv->mm.request_list);
|
||||
|
||||
file_priv->bsd_engine = -1;
|
||||
|
||||
ret = i915_gem_context_open(dev, file);
|
||||
ret = i915_gem_context_open(i915, file);
|
||||
if (ret)
|
||||
kfree(file_priv);
|
||||
|
||||
@@ -5311,6 +5307,64 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
||||
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
||||
{
|
||||
struct sg_table *pages;
|
||||
int err;
|
||||
|
||||
if (align > obj->base.size)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->ops == &i915_gem_phys_ops)
|
||||
return 0;
|
||||
|
||||
if (obj->ops != &i915_gem_object_ops)
|
||||
return -EINVAL;
|
||||
|
||||
err = i915_gem_object_unbind(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&obj->mm.lock);
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.quirked) {
|
||||
err = -EFAULT;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
err = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
pages = obj->mm.pages;
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto err_xfer;
|
||||
|
||||
/* Perma-pin (until release) the physical set of pages */
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
|
||||
if (!IS_ERR_OR_NULL(pages))
|
||||
i915_gem_object_ops.put_pages(obj, pages);
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return 0;
|
||||
|
||||
err_xfer:
|
||||
obj->ops = &i915_gem_object_ops;
|
||||
obj->mm.pages = pages;
|
||||
err_unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/scatterlist.c"
|
||||
#include "selftests/mock_gem_device.c"
|
||||
|
@@ -139,7 +139,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
* snooping behaviour occurs naturally as the result of our domain
|
||||
* tracking.
|
||||
*/
|
||||
if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
|
||||
if (!(flags & I915_CLFLUSH_FORCE) &&
|
||||
obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
|
||||
return false;
|
||||
|
||||
trace_i915_gem_object_clflush(obj);
|
||||
|
@@ -93,81 +93,37 @@
|
||||
|
||||
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
|
||||
|
||||
/* Initial size (as log2) to preallocate the handle->object hashtable */
|
||||
#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
|
||||
|
||||
static void resize_vma_ht(struct work_struct *work)
|
||||
static void lut_close(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut =
|
||||
container_of(work, typeof(*lut), resize);
|
||||
unsigned int bits, new_bits, size, i;
|
||||
struct hlist_head *new_ht;
|
||||
struct i915_lut_handle *lut, *ln;
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
|
||||
|
||||
bits = 1 + ilog2(4*lut->ht_count/3 + 1);
|
||||
new_bits = min_t(unsigned int,
|
||||
max(bits, VMA_HT_BITS),
|
||||
sizeof(unsigned int) * BITS_PER_BYTE - 1);
|
||||
if (new_bits == lut->ht_bits)
|
||||
goto out;
|
||||
|
||||
new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!new_ht)
|
||||
new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
|
||||
if (!new_ht)
|
||||
/* Pretend resize succeeded and stop calling us for a bit! */
|
||||
goto out;
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
|
||||
hlist_add_head(&vma->ctx_node,
|
||||
&new_ht[hash_32(vma->ctx_handle,
|
||||
new_bits)]);
|
||||
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
|
||||
list_del(&lut->obj_link);
|
||||
kmem_cache_free(ctx->i915->luts, lut);
|
||||
}
|
||||
|
||||
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
|
||||
struct i915_vma *vma = rcu_dereference_raw(*slot);
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
|
||||
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
i915_vma_close(vma);
|
||||
|
||||
__i915_gem_object_release_unless_active(obj);
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
lut->ht = new_ht;
|
||||
lut->ht_bits = new_bits;
|
||||
out:
|
||||
smp_store_release(&lut->ht_size, BIT(bits));
|
||||
GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
|
||||
}
|
||||
|
||||
static void vma_lut_free(struct i915_gem_context *ctx)
|
||||
static void i915_gem_context_free(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
|
||||
unsigned int i, size;
|
||||
|
||||
if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
|
||||
cancel_work_sync(&lut->resize);
|
||||
|
||||
size = BIT(lut->ht_bits);
|
||||
for (i = 0; i < size; i++) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
|
||||
vma->obj->vma_hashed = NULL;
|
||||
vma->ctx = NULL;
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
}
|
||||
kvfree(lut->ht);
|
||||
}
|
||||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
trace_i915_context_free(ctx);
|
||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||
|
||||
vma_lut_free(ctx);
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
@@ -188,15 +144,64 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
|
||||
list_del(&ctx->link);
|
||||
|
||||
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
|
||||
kfree(ctx);
|
||||
ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
|
||||
kfree_rcu(ctx, rcu);
|
||||
}
|
||||
|
||||
static void contexts_free(struct drm_i915_private *i915)
|
||||
{
|
||||
struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
|
||||
struct i915_gem_context *ctx, *cn;
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
llist_for_each_entry_safe(ctx, cn, freed, free_link)
|
||||
i915_gem_context_free(ctx);
|
||||
}
|
||||
|
||||
static void contexts_free_first(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
struct llist_node *freed;
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
freed = llist_del_first(&i915->contexts.free_list);
|
||||
if (!freed)
|
||||
return;
|
||||
|
||||
ctx = container_of(freed, typeof(*ctx), free_link);
|
||||
i915_gem_context_free(ctx);
|
||||
}
|
||||
|
||||
static void contexts_free_worker(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(work, typeof(*i915), contexts.free_work);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
contexts_free(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
void i915_gem_context_release(struct kref *ref)
|
||||
{
|
||||
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
|
||||
struct drm_i915_private *i915 = ctx->i915;
|
||||
|
||||
trace_i915_context_free(ctx);
|
||||
if (llist_add(&ctx->free_link, &i915->contexts.free_list))
|
||||
queue_work(i915->wq, &i915->contexts.free_work);
|
||||
}
|
||||
|
||||
static void context_close(struct i915_gem_context *ctx)
|
||||
{
|
||||
i915_gem_context_set_closed(ctx);
|
||||
|
||||
lut_close(ctx);
|
||||
if (ctx->ppgtt)
|
||||
i915_ppgtt_close(&ctx->ppgtt->base);
|
||||
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
@@ -205,7 +210,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ida_simple_get(&dev_priv->context_hw_ida,
|
||||
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
|
||||
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
/* Contexts are only released when no longer active.
|
||||
@@ -213,7 +218,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
|
||||
* stale contexts and try again.
|
||||
*/
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
ret = ida_simple_get(&dev_priv->context_hw_ida,
|
||||
ret = ida_simple_get(&dev_priv->contexts.hw_ida,
|
||||
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -265,20 +270,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
kref_init(&ctx->ref);
|
||||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
list_add_tail(&ctx->link, &dev_priv->contexts.list);
|
||||
ctx->i915 = dev_priv;
|
||||
ctx->priority = I915_PRIORITY_NORMAL;
|
||||
|
||||
ctx->vma_lut.ht_bits = VMA_HT_BITS;
|
||||
ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
|
||||
BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
|
||||
ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
|
||||
sizeof(*ctx->vma_lut.ht),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->vma_lut.ht)
|
||||
goto err_out;
|
||||
|
||||
INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
|
||||
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
|
||||
INIT_LIST_HEAD(&ctx->handles_list);
|
||||
|
||||
/* Default context will never have a file_priv */
|
||||
ret = DEFAULT_CONTEXT_HANDLE;
|
||||
@@ -328,8 +325,6 @@ err_pid:
|
||||
put_pid(ctx->pid);
|
||||
idr_remove(&file_priv->context_idr, ctx->user_handle);
|
||||
err_lut:
|
||||
kvfree(ctx->vma_lut.ht);
|
||||
err_out:
|
||||
context_close(ctx);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -354,6 +349,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Reap the most stale context */
|
||||
contexts_free_first(dev_priv);
|
||||
|
||||
ctx = __create_hw_context(dev_priv, file_priv);
|
||||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
@@ -418,7 +416,7 @@ out:
|
||||
return ctx;
|
||||
}
|
||||
|
||||
int i915_gem_context_init(struct drm_i915_private *dev_priv)
|
||||
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
@@ -427,6 +425,10 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
|
||||
if (WARN_ON(dev_priv->kernel_context))
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->contexts.list);
|
||||
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
|
||||
init_llist_head(&dev_priv->contexts.free_list);
|
||||
|
||||
if (intel_vgpu_active(dev_priv) &&
|
||||
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
|
||||
if (!i915.enable_execlists) {
|
||||
@@ -437,7 +439,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* Using the simple ida interface, the max is limited by sizeof(int) */
|
||||
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
|
||||
ida_init(&dev_priv->context_hw_ida);
|
||||
ida_init(&dev_priv->contexts.hw_ida);
|
||||
|
||||
ctx = i915_gem_create_context(dev_priv, NULL);
|
||||
if (IS_ERR(ctx)) {
|
||||
@@ -463,7 +465,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
@@ -484,7 +486,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
if (!i915.enable_execlists) {
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
|
||||
if (!i915_gem_context_is_default(ctx))
|
||||
continue;
|
||||
|
||||
@@ -503,18 +505,20 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_fini(struct drm_i915_private *dev_priv)
|
||||
void i915_gem_contexts_fini(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gem_context *dctx = dev_priv->kernel_context;
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
|
||||
/* Keep the context so that we can free it immediately ourselves */
|
||||
ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
|
||||
context_close(ctx);
|
||||
i915_gem_context_free(ctx);
|
||||
|
||||
context_close(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
|
||||
ida_destroy(&dev_priv->context_hw_ida);
|
||||
/* Must free all deferred contexts (via flush_workqueue) first */
|
||||
ida_destroy(&i915->contexts.hw_ida);
|
||||
}
|
||||
|
||||
static int context_idr_cleanup(int id, void *p, void *data)
|
||||
@@ -525,32 +529,32 @@ static int context_idr_cleanup(int id, void *p, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
|
||||
int i915_gem_context_open(struct drm_i915_private *i915,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ctx = i915_gem_create_context(to_i915(dev), file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
ctx = i915_gem_create_context(i915, file_priv);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (IS_ERR(ctx)) {
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
|
||||
void i915_gem_context_close(struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
|
||||
|
||||
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
@@ -925,7 +929,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
||||
|
||||
static bool client_is_banned(struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
|
||||
return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
|
||||
}
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
@@ -982,20 +986,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
|
||||
return -ENOENT;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
if (!ctx)
|
||||
return -ENOENT;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
__destroy_hw_context(ctx, file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
|
||||
out:
|
||||
i915_gem_context_put(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1005,17 +1008,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct drm_i915_gem_context_param *args = data;
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
int ret = 0;
|
||||
|
||||
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
if (!ctx)
|
||||
return -ENOENT;
|
||||
|
||||
args->size = 0;
|
||||
switch (args->param) {
|
||||
@@ -1043,8 +1040,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_context_put(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1056,15 +1053,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
|
||||
if (!ctx)
|
||||
return -ENOENT;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
goto out;
|
||||
|
||||
switch (args->param) {
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
@@ -1102,6 +1097,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
out:
|
||||
i915_gem_context_put(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1116,27 +1113,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
|
||||
if (args->flags || args->pad)
|
||||
return -EINVAL;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = -ENOENT;
|
||||
rcu_read_lock();
|
||||
ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
|
||||
if (!ctx)
|
||||
goto out;
|
||||
|
||||
ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
/*
|
||||
* We opt for unserialised reads here. This may result in tearing
|
||||
* in the extremely unlikely event of a GPU hang on this context
|
||||
* as we are querying them. If we need that extra layer of protection,
|
||||
* we should wrap the hangstats with a seqlock.
|
||||
*/
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
|
||||
else
|
||||
args->reset_count = 0;
|
||||
|
||||
args->batch_active = ctx->guilty_count;
|
||||
args->batch_pending = ctx->active_count;
|
||||
args->batch_active = atomic_read(&ctx->guilty_count);
|
||||
args->batch_pending = atomic_read(&ctx->active_count);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
@@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
struct pid;
|
||||
|
||||
@@ -86,6 +87,7 @@ struct i915_gem_context {
|
||||
|
||||
/** link: place with &drm_i915_private.context_list */
|
||||
struct list_head link;
|
||||
struct llist_node free_link;
|
||||
|
||||
/**
|
||||
* @ref: reference count
|
||||
@@ -98,6 +100,11 @@ struct i915_gem_context {
|
||||
*/
|
||||
struct kref ref;
|
||||
|
||||
/**
|
||||
* @rcu: rcu_head for deferred freeing.
|
||||
*/
|
||||
struct rcu_head rcu;
|
||||
|
||||
/**
|
||||
* @flags: small set of booleans
|
||||
*/
|
||||
@@ -143,32 +150,6 @@ struct i915_gem_context {
|
||||
/** ggtt_offset_bias: placement restriction for context objects */
|
||||
u32 ggtt_offset_bias;
|
||||
|
||||
struct i915_gem_context_vma_lut {
|
||||
/** ht_size: last request size to allocate the hashtable for. */
|
||||
unsigned int ht_size;
|
||||
#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
|
||||
/** ht_bits: real log2(size) of hashtable. */
|
||||
unsigned int ht_bits;
|
||||
/** ht_count: current number of entries inside the hashtable */
|
||||
unsigned int ht_count;
|
||||
|
||||
/** ht: the array of buckets comprising the simple hashtable */
|
||||
struct hlist_head *ht;
|
||||
|
||||
/**
|
||||
* resize: After an execbuf completes, we check the load factor
|
||||
* of the hashtable. If the hashtable is too full, or too empty,
|
||||
* we schedule a task to resize the hashtable. During the
|
||||
* resize, the entries are moved between different buckets and
|
||||
* so we cannot simultaneously read the hashtable as it is
|
||||
* being resized (unlike rhashtable). Therefore we treat the
|
||||
* active work as a strong barrier, pausing a subsequent
|
||||
* execbuf to wait for the resize worker to complete, if
|
||||
* required.
|
||||
*/
|
||||
struct work_struct resize;
|
||||
} vma_lut;
|
||||
|
||||
/** engine: per-engine logical HW state */
|
||||
struct intel_context {
|
||||
struct i915_vma *state;
|
||||
@@ -185,20 +166,32 @@ struct i915_gem_context {
|
||||
u32 desc_template;
|
||||
|
||||
/** guilty_count: How many times this context has caused a GPU hang. */
|
||||
unsigned int guilty_count;
|
||||
atomic_t guilty_count;
|
||||
/**
|
||||
* @active_count: How many times this context was active during a GPU
|
||||
* hang, but did not cause it.
|
||||
*/
|
||||
unsigned int active_count;
|
||||
atomic_t active_count;
|
||||
|
||||
#define CONTEXT_SCORE_GUILTY 10
|
||||
#define CONTEXT_SCORE_BAN_THRESHOLD 40
|
||||
/** ban_score: Accumulated score of all hangs caused by this context. */
|
||||
int ban_score;
|
||||
atomic_t ban_score;
|
||||
|
||||
/** remap_slice: Bitmask of cache lines that need remapping */
|
||||
u8 remap_slice;
|
||||
|
||||
/** handles_vma: rbtree to look up our context specific obj/vma for
|
||||
* the user handle. (user handles are per fd, but the binding is
|
||||
* per vm, which may be one per context or shared with the global GTT)
|
||||
*/
|
||||
struct radix_tree_root handles_vma;
|
||||
|
||||
/** handles_list: reverse list of all the rbtree entries in use for
|
||||
* this context, which allows us to free all the allocations on
|
||||
* context close.
|
||||
*/
|
||||
struct list_head handles_list;
|
||||
};
|
||||
|
||||
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
|
||||
@@ -273,14 +266,18 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
|
||||
}
|
||||
|
||||
/* i915_gem_context.c */
|
||||
int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_lost(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_fini(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
|
||||
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
|
||||
|
||||
int i915_gem_context_open(struct drm_i915_private *i915,
|
||||
struct drm_file *file);
|
||||
void i915_gem_context_close(struct drm_file *file);
|
||||
|
||||
int i915_switch_context(struct drm_i915_gem_request *req);
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
|
||||
void i915_gem_context_release(struct kref *ctx_ref);
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev);
|
||||
|
||||
@@ -295,4 +292,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
static inline struct i915_gem_context *
|
||||
i915_gem_context_get(struct i915_gem_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
|
||||
{
|
||||
kref_put(&ctx->ref, i915_gem_context_release);
|
||||
}
|
||||
|
||||
#endif /* !__I915_GEM_CONTEXT_H__ */
|
||||
|
@@ -318,8 +318,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
/* Overlap of objects in the same batch? */
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
ret = -ENOSPC;
|
||||
if (vma->exec_entry &&
|
||||
vma->exec_entry->flags & EXEC_OBJECT_PINNED)
|
||||
if (vma->exec_flags &&
|
||||
*vma->exec_flags & EXEC_OBJECT_PINNED)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -144,9 +144,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
/* emulation is too hard */
|
||||
/* GVT-g has no support for 32bit ppgtt */
|
||||
has_full_ppgtt = false;
|
||||
has_full_48bit_ppgtt = false;
|
||||
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
|
||||
}
|
||||
|
||||
if (!has_aliasing_ppgtt)
|
||||
@@ -180,10 +180,15 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
|
||||
return has_full_48bit_ppgtt ? 3 : 2;
|
||||
else
|
||||
return has_aliasing_ppgtt ? 1 : 0;
|
||||
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
|
||||
if (has_full_48bit_ppgtt)
|
||||
return 3;
|
||||
|
||||
if (has_full_ppgtt)
|
||||
return 2;
|
||||
}
|
||||
|
||||
return has_aliasing_ppgtt ? 1 : 0;
|
||||
}
|
||||
|
||||
static int ppgtt_bind_vma(struct i915_vma *vma,
|
||||
@@ -207,8 +212,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
|
||||
if (vma->obj->gt_ro)
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -907,37 +911,35 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct sgt_dma iter = {
|
||||
.sg = pages->sgl,
|
||||
.sg = vma->pages->sgl,
|
||||
.dma = sg_dma_address(iter.sg),
|
||||
.max = iter.dma + iter.sg->length,
|
||||
};
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(start);
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
|
||||
|
||||
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
|
||||
cache_level);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct sgt_dma iter = {
|
||||
.sg = pages->sgl,
|
||||
.sg = vma->pages->sgl,
|
||||
.dma = sg_dma_address(iter.sg),
|
||||
.max = iter.dma + iter.sg->length,
|
||||
};
|
||||
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(start);
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
|
||||
|
||||
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
|
||||
&idx, cache_level))
|
||||
@@ -1621,13 +1623,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned first_entry = vma->node.start >> PAGE_SHIFT;
|
||||
unsigned act_pt = first_entry / GEN6_PTES;
|
||||
unsigned act_pte = first_entry % GEN6_PTES;
|
||||
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
|
||||
@@ -1635,7 +1636,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
gen6_pte_t *vaddr;
|
||||
|
||||
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
|
||||
iter.sg = pages->sgl;
|
||||
iter.sg = vma->pages->sgl;
|
||||
iter.dma = sg_dma_address(iter.sg);
|
||||
iter.max = iter.dma + iter.sg->length;
|
||||
do {
|
||||
@@ -2090,8 +2091,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
{
|
||||
@@ -2102,8 +2102,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
dma_addr_t addr;
|
||||
|
||||
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
|
||||
gtt_entries += start >> PAGE_SHIFT;
|
||||
for_each_sgt_dma(addr, sgt_iter, st)
|
||||
gtt_entries += vma->node.start >> PAGE_SHIFT;
|
||||
for_each_sgt_dma(addr, sgt_iter, vma->pages)
|
||||
gen8_set_pte(gtt_entries++, pte_encode | addr);
|
||||
|
||||
wmb();
|
||||
@@ -2137,17 +2137,16 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
|
||||
* mapped BAR (dev_priv->mm.gtt->gtt).
|
||||
*/
|
||||
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
|
||||
unsigned int i = start >> PAGE_SHIFT;
|
||||
unsigned int i = vma->node.start >> PAGE_SHIFT;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t addr;
|
||||
for_each_sgt_dma(addr, iter, st)
|
||||
for_each_sgt_dma(addr, iter, vma->pages)
|
||||
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
|
||||
wmb();
|
||||
|
||||
@@ -2229,8 +2228,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
|
||||
|
||||
struct insert_entries {
|
||||
struct i915_address_space *vm;
|
||||
struct sg_table *st;
|
||||
u64 start;
|
||||
struct i915_vma *vma;
|
||||
enum i915_cache_level level;
|
||||
};
|
||||
|
||||
@@ -2238,19 +2236,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
|
||||
{
|
||||
struct insert_entries *arg = _arg;
|
||||
|
||||
gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
|
||||
gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
|
||||
bxt_vtd_ggtt_wa(arg->vm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
{
|
||||
struct insert_entries arg = { vm, st, start, level };
|
||||
struct insert_entries arg = { vm, vma, level };
|
||||
|
||||
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
|
||||
}
|
||||
@@ -2316,15 +2313,15 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
|
||||
}
|
||||
|
||||
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
|
||||
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
|
||||
intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
|
||||
flags);
|
||||
}
|
||||
|
||||
static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
||||
@@ -2353,8 +2350,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
||||
/*
|
||||
@@ -2407,16 +2403,13 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
goto err_pages;
|
||||
}
|
||||
|
||||
appgtt->base.insert_entries(&appgtt->base,
|
||||
vma->pages, vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
|
||||
pte_flags);
|
||||
}
|
||||
|
||||
if (flags & I915_VMA_GLOBAL_BIND) {
|
||||
intel_runtime_pm_get(i915);
|
||||
vma->vm->insert_entries(vma->vm,
|
||||
vma->pages, vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
intel_runtime_pm_put(i915);
|
||||
}
|
||||
|
||||
@@ -2749,6 +2742,24 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* XXX: spec is unclear if this is still needed for CNL+ */
|
||||
if (!USES_PPGTT(dev_priv)) {
|
||||
I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
|
||||
I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
|
||||
I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
||||
I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
|
||||
I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
|
||||
I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
|
||||
I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
|
||||
I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
}
|
||||
|
||||
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
||||
* bits. When using advanced contexts each context stores its own PAT, but
|
||||
* writing this data shouldn't be harmful even in those cases. */
|
||||
@@ -2863,7 +2874,9 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
cnl_setup_private_ppat(dev_priv);
|
||||
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
@@ -3145,7 +3158,9 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
ggtt->base.closed = false;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
cnl_setup_private_ppat(dev_priv);
|
||||
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
|
@@ -313,8 +313,7 @@ struct i915_address_space {
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
u64 start,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void (*cleanup)(struct i915_address_space *vm);
|
||||
|
@@ -174,6 +174,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
|
||||
phys_addr_t size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned int cache_level;
|
||||
|
||||
GEM_BUG_ON(!size);
|
||||
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
|
||||
@@ -190,9 +191,9 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
|
||||
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
|
||||
cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
48
drivers/gpu/drm/i915/i915_gem_object.c
Normal file
48
drivers/gpu/drm/i915/i915_gem_object.c
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright © 2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_object.h"
|
||||
|
||||
/**
|
||||
* Mark up the object's coherency levels for a given cache_level
|
||||
* @obj: #drm_i915_gem_object
|
||||
* @cache_level: cache level
|
||||
*/
|
||||
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level)
|
||||
{
|
||||
obj->cache_level = cache_level;
|
||||
|
||||
if (cache_level != I915_CACHE_NONE)
|
||||
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
|
||||
I915_BO_CACHE_COHERENT_FOR_WRITE);
|
||||
else if (HAS_LLC(to_i915(obj->base.dev)))
|
||||
obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
|
||||
else
|
||||
obj->cache_coherent = 0;
|
||||
|
||||
obj->cache_dirty =
|
||||
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
|
||||
}
|
@@ -33,8 +33,24 @@
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_gem_request.h"
|
||||
#include "i915_selftest.h"
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
/*
|
||||
* struct i915_lut_handle tracks the fast lookups from handle to vma used
|
||||
* for execbuf. Although we use a radixtree for that mapping, in order to
|
||||
* remove them as the object or context is closed, we need a secondary list
|
||||
* and a translation entry (i915_lut_handle).
|
||||
*/
|
||||
struct i915_lut_handle {
|
||||
struct list_head obj_link;
|
||||
struct list_head ctx_link;
|
||||
struct i915_gem_context *ctx;
|
||||
u32 handle;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object_ops {
|
||||
unsigned int flags;
|
||||
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
|
||||
@@ -86,7 +102,15 @@ struct drm_i915_gem_object {
|
||||
* They are also added to @vma_list for easy iteration.
|
||||
*/
|
||||
struct rb_root vma_tree;
|
||||
struct i915_vma *vma_hashed;
|
||||
|
||||
/**
|
||||
* @lut_list: List of vma lookup entries in use for this object.
|
||||
*
|
||||
* If this object is closed, we need to remove all of its VMA from
|
||||
* the fast lookup index in associated contexts; @lut_list provides
|
||||
* this translation from object to context->handles_vma.
|
||||
*/
|
||||
struct list_head lut_list;
|
||||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
@@ -118,8 +142,10 @@ struct drm_i915_gem_object {
|
||||
*/
|
||||
unsigned long gt_ro:1;
|
||||
unsigned int cache_level:3;
|
||||
unsigned int cache_coherent:2;
|
||||
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
|
||||
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
|
||||
unsigned int cache_dirty:1;
|
||||
unsigned int cache_coherent:1;
|
||||
|
||||
atomic_t frontbuffer_bits;
|
||||
unsigned int frontbuffer_ggtt_origin; /* write once */
|
||||
@@ -391,6 +417,8 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
|
||||
return engine;
|
||||
}
|
||||
|
||||
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level);
|
||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
||||
|
||||
#endif
|
||||
|
@@ -213,6 +213,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Check we are idle before we fiddle with hw state! */
|
||||
GEM_BUG_ON(!intel_engine_is_idle(engine));
|
||||
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
|
||||
|
||||
/* Finally reset hw state */
|
||||
intel_engine_init_global_seqno(engine, seqno);
|
||||
tl->seqno = seqno;
|
||||
@@ -240,27 +244,60 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
|
||||
return reset_all_global_seqno(dev_priv, seqno - 1);
|
||||
}
|
||||
|
||||
static int reserve_seqno(struct intel_engine_cs *engine)
|
||||
static void mark_busy(struct drm_i915_private *i915)
|
||||
{
|
||||
if (i915->gt.awake)
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!i915->gt.active_requests);
|
||||
|
||||
intel_runtime_pm_get_noresume(i915);
|
||||
i915->gt.awake = true;
|
||||
|
||||
intel_enable_gt_powersave(i915);
|
||||
i915_update_gfx_val(i915);
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
gen6_rps_busy(i915);
|
||||
|
||||
queue_delayed_work(i915->wq,
|
||||
&i915->gt.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
|
||||
static int reserve_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
u32 active = ++engine->timeline->inflight_seqnos;
|
||||
u32 seqno = engine->timeline->seqno;
|
||||
int ret;
|
||||
|
||||
/* Reservation is fine until we need to wrap around */
|
||||
if (likely(!add_overflows(seqno, active)))
|
||||
return 0;
|
||||
|
||||
ret = reset_all_global_seqno(engine->i915, 0);
|
||||
if (ret) {
|
||||
engine->timeline->inflight_seqnos--;
|
||||
return ret;
|
||||
if (unlikely(add_overflows(seqno, active))) {
|
||||
ret = reset_all_global_seqno(i915, 0);
|
||||
if (ret) {
|
||||
engine->timeline->inflight_seqnos--;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (!i915->gt.active_requests++)
|
||||
mark_busy(i915);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unreserve_seqno(struct intel_engine_cs *engine)
|
||||
static void unreserve_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *i915 = engine->i915;
|
||||
|
||||
if (!--i915->gt.active_requests) {
|
||||
/* Cancel the mark_busy() from our reserve_engine() */
|
||||
GEM_BUG_ON(!i915->gt.awake);
|
||||
mod_delayed_work(i915->wq,
|
||||
&i915->gt.idle_work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!engine->timeline->inflight_seqnos);
|
||||
engine->timeline->inflight_seqnos--;
|
||||
}
|
||||
@@ -329,13 +366,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
list_del_init(&request->link);
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
|
||||
if (!--request->i915->gt.active_requests) {
|
||||
GEM_BUG_ON(!request->i915->gt.awake);
|
||||
mod_delayed_work(request->i915->wq,
|
||||
&request->i915->gt.idle_work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
||||
unreserve_seqno(request->engine);
|
||||
unreserve_engine(request->engine);
|
||||
advance_ring(request);
|
||||
|
||||
free_capture_list(request);
|
||||
@@ -370,8 +401,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
/* Retirement decays the ban score as it is a sign of ctx progress */
|
||||
if (request->ctx->ban_score > 0)
|
||||
request->ctx->ban_score--;
|
||||
atomic_dec_if_positive(&request->ctx->ban_score);
|
||||
|
||||
/* The backing object for the context is done after switching to the
|
||||
* *next* context. Therefore we cannot retire the previous context until
|
||||
@@ -384,7 +414,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
engine->context_unpin(engine, engine->last_retired_context);
|
||||
engine->last_retired_context = request->ctx;
|
||||
|
||||
dma_fence_signal(&request->fence);
|
||||
spin_lock_irq(&request->lock);
|
||||
if (request->waitboost)
|
||||
atomic_dec(&request->i915->rps.num_waiters);
|
||||
dma_fence_signal_locked(&request->fence);
|
||||
spin_unlock_irq(&request->lock);
|
||||
|
||||
i915_priotree_fini(request->i915, &request->priotree);
|
||||
i915_gem_request_put(request);
|
||||
@@ -568,7 +602,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
return ERR_CAST(ring);
|
||||
GEM_BUG_ON(!ring);
|
||||
|
||||
ret = reserve_seqno(engine);
|
||||
ret = reserve_engine(engine);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
@@ -639,6 +673,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
req->file_priv = NULL;
|
||||
req->batch = NULL;
|
||||
req->capture_list = NULL;
|
||||
req->waitboost = false;
|
||||
|
||||
/*
|
||||
* Reserve space in the ring buffer for all the commands required to
|
||||
@@ -673,7 +708,7 @@ err_ctx:
|
||||
|
||||
kmem_cache_free(dev_priv->requests, req);
|
||||
err_unreserve:
|
||||
unreserve_seqno(engine);
|
||||
unreserve_engine(engine);
|
||||
err_unpin:
|
||||
engine->context_unpin(engine, ctx);
|
||||
return ERR_PTR(ret);
|
||||
@@ -855,28 +890,6 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
if (dev_priv->gt.awake)
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!dev_priv->gt.active_requests);
|
||||
|
||||
intel_runtime_pm_get_noresume(dev_priv);
|
||||
dev_priv->gt.awake = true;
|
||||
|
||||
intel_enable_gt_powersave(dev_priv);
|
||||
i915_update_gfx_val(dev_priv);
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
gen6_rps_busy(dev_priv);
|
||||
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->gt.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
|
||||
/*
|
||||
* NB: This function is not allowed to fail. Doing so would mean the the
|
||||
* request is not being tracked for completion but the work itself is
|
||||
@@ -958,9 +971,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
|
||||
list_add_tail(&request->ring_link, &ring->request_list);
|
||||
request->emitted_jiffies = jiffies;
|
||||
|
||||
if (!request->i915->gt.active_requests++)
|
||||
i915_gem_mark_busy(engine);
|
||||
|
||||
/* Let the backend know a new request has arrived that may need
|
||||
* to adjust the existing execution schedule due to a high priority
|
||||
* request - i.e. we may want to preempt the current request in order
|
||||
@@ -1063,7 +1073,7 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
|
||||
return false;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
i915_reset(request->i915);
|
||||
i915_reset(request->i915, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -184,6 +184,8 @@ struct drm_i915_gem_request {
|
||||
/** Time at which this request was emitted, in jiffies. */
|
||||
unsigned long emitted_jiffies;
|
||||
|
||||
bool waitboost;
|
||||
|
||||
/** engine->request_list entry for this request */
|
||||
struct list_head link;
|
||||
|
||||
|
@@ -254,9 +254,10 @@ static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
|
||||
* This is a BIOS w/a: Some BIOS wrap stolen in the root
|
||||
* PCI bus, but have an off-by-one error. Hence retry the
|
||||
* reservation starting from 1 instead of 0.
|
||||
* There's also BIOS with off-by-one on the other end.
|
||||
*/
|
||||
r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
|
||||
ggtt->stolen_size - 1,
|
||||
ggtt->stolen_size - 2,
|
||||
"Graphics Stolen Memory");
|
||||
/*
|
||||
* GEN3 firmware likes to smash pci bridges into the stolen
|
||||
@@ -579,6 +580,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *stolen)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned int cache_level;
|
||||
|
||||
obj = i915_gem_object_alloc(dev_priv);
|
||||
if (obj == NULL)
|
||||
@@ -589,8 +591,8 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
|
||||
obj->stolen = stolen;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
|
||||
obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
|
||||
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
i915_gem_object_set_cache_coherency(obj, cache_level);
|
||||
|
||||
if (i915_gem_object_pin_pages(obj))
|
||||
goto cleanup;
|
||||
|
@@ -804,9 +804,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
i915_gem_object_init(obj, &i915_gem_userptr_ops);
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_level = I915_CACHE_LLC;
|
||||
obj->cache_coherent = i915_gem_object_is_coherent(obj);
|
||||
obj->cache_dirty = !obj->cache_coherent;
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
obj->userptr.ptr = args->user_ptr;
|
||||
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
|
||||
|
@@ -463,6 +463,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
|
||||
ee->hangcheck_timestamp,
|
||||
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
|
||||
err_printf(m, " engine reset count: %u\n", ee->reset_count);
|
||||
|
||||
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
|
||||
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
|
||||
@@ -1236,6 +1237,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
|
||||
ee->hangcheck_action = engine->hangcheck.action;
|
||||
ee->hangcheck_stalled = engine->hangcheck.stalled;
|
||||
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
|
||||
engine);
|
||||
|
||||
if (USES_PPGTT(dev_priv)) {
|
||||
int i;
|
||||
@@ -1263,7 +1266,7 @@ static void record_request(struct drm_i915_gem_request *request,
|
||||
struct drm_i915_error_request *erq)
|
||||
{
|
||||
erq->context = request->ctx->hw_id;
|
||||
erq->ban_score = request->ctx->ban_score;
|
||||
erq->ban_score = atomic_read(&request->ctx->ban_score);
|
||||
erq->seqno = request->global_seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
erq->head = request->head;
|
||||
@@ -1354,9 +1357,9 @@ static void record_context(struct drm_i915_error_context *e,
|
||||
|
||||
e->handle = ctx->user_handle;
|
||||
e->hw_id = ctx->hw_id;
|
||||
e->ban_score = ctx->ban_score;
|
||||
e->guilty = ctx->guilty_count;
|
||||
e->active = ctx->active_count;
|
||||
e->ban_score = atomic_read(&ctx->ban_score);
|
||||
e->guilty = atomic_read(&ctx->guilty_count);
|
||||
e->active = atomic_read(&ctx->active_count);
|
||||
}
|
||||
|
||||
static void request_record_user_bo(struct drm_i915_gem_request *request,
|
||||
|
@@ -275,17 +275,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
|
||||
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
}
|
||||
|
||||
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
|
||||
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
|
||||
}
|
||||
|
||||
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
|
||||
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1091,18 +1091,6 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
return events;
|
||||
}
|
||||
|
||||
static bool any_waiters(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
if (intel_engine_has_waiter(engine))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gen6_pm_rps_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
@@ -1114,7 +1102,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
|
||||
client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
|
||||
client_boost = atomic_read(&dev_priv->rps.num_waiters);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
@@ -1131,7 +1119,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
new_delay = dev_priv->rps.cur_freq;
|
||||
min = dev_priv->rps.min_freq_softlimit;
|
||||
max = dev_priv->rps.max_freq_softlimit;
|
||||
if (client_boost || any_waiters(dev_priv))
|
||||
if (client_boost)
|
||||
max = dev_priv->rps.max_freq;
|
||||
if (client_boost && new_delay < dev_priv->rps.boost_freq) {
|
||||
new_delay = dev_priv->rps.boost_freq;
|
||||
@@ -1144,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
||||
|
||||
if (new_delay >= dev_priv->rps.max_freq_softlimit)
|
||||
adj = 0;
|
||||
} else if (client_boost || any_waiters(dev_priv)) {
|
||||
} else if (client_boost) {
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
||||
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
|
||||
@@ -1513,7 +1501,8 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
||||
|
||||
*pin_mask |= BIT(i);
|
||||
|
||||
if (!intel_hpd_pin_to_port(i, &port))
|
||||
port = intel_hpd_pin_to_port(i);
|
||||
if (port == PORT_NONE)
|
||||
continue;
|
||||
|
||||
if (long_pulse_detect(port, dig_hotplug_reg))
|
||||
@@ -1603,7 +1592,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
crcs[3] = crc3;
|
||||
crcs[4] = crc4;
|
||||
drm_crtc_add_crc_entry(&crtc->base, true,
|
||||
drm_accurate_vblank_count(&crtc->base),
|
||||
drm_crtc_accurate_vblank_count(&crtc->base),
|
||||
crcs);
|
||||
}
|
||||
}
|
||||
@@ -1673,7 +1662,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8)
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
return;
|
||||
|
||||
if (HAS_VEBOX(dev_priv)) {
|
||||
@@ -1720,18 +1709,6 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
|
||||
}
|
||||
}
|
||||
|
||||
static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
ret = drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
if (ret)
|
||||
intel_finish_page_flip_mmio(dev_priv, pipe);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
|
||||
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
|
||||
{
|
||||
@@ -1796,12 +1773,8 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
|
||||
intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
|
||||
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
|
||||
@@ -2098,10 +2071,10 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
|
||||
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
|
||||
|
||||
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
|
||||
|
||||
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
|
||||
}
|
||||
|
||||
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
|
||||
@@ -2135,13 +2108,13 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
|
||||
DRM_ERROR("PCH poison interrupt\n");
|
||||
|
||||
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
|
||||
|
||||
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
|
||||
|
||||
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
|
||||
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
|
||||
|
||||
I915_WRITE(SERR_INT, serr_int);
|
||||
}
|
||||
@@ -2253,19 +2226,14 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
|
||||
DRM_ERROR("Poison interrupt\n");
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (de_iir & DE_PIPE_VBLANK(pipe) &&
|
||||
intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
if (de_iir & DE_PIPE_VBLANK(pipe))
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
|
||||
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
|
||||
|
||||
if (de_iir & DE_PIPE_CRC_DONE(pipe))
|
||||
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
|
||||
|
||||
/* plane/pipes map 1:1 on ilk+ */
|
||||
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
}
|
||||
|
||||
/* check event from PCH */
|
||||
@@ -2304,13 +2272,8 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
|
||||
intel_opregion_asle_intr(dev_priv);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
|
||||
intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
|
||||
/* plane/pipes map 1:1 on ilk+ */
|
||||
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
}
|
||||
|
||||
/* check event from PCH */
|
||||
@@ -2452,7 +2415,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
tmp_mask = GEN8_AUX_CHANNEL_A;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
tmp_mask |= GEN9_AUX_CHANNEL_B |
|
||||
GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
@@ -2491,7 +2454,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
}
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
u32 flip_done, fault_errors;
|
||||
u32 fault_errors;
|
||||
|
||||
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
|
||||
continue;
|
||||
@@ -2505,18 +2468,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
ret = IRQ_HANDLED;
|
||||
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
|
||||
|
||||
if (iir & GEN8_PIPE_VBLANK &&
|
||||
intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
|
||||
flip_done = iir;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
|
||||
else
|
||||
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
|
||||
|
||||
if (flip_done)
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
if (iir & GEN8_PIPE_VBLANK)
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
|
||||
hsw_pipe_crc_irq_handler(dev_priv, pipe);
|
||||
@@ -2525,7 +2478,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
|
||||
|
||||
fault_errors = iir;
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9)
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
else
|
||||
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
@@ -2599,86 +2552,93 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct wedge_me {
|
||||
struct delayed_work work;
|
||||
struct drm_i915_private *i915;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static void wedge_me(struct work_struct *work)
|
||||
{
|
||||
struct wedge_me *w = container_of(work, typeof(*w), work.work);
|
||||
|
||||
dev_err(w->i915->drm.dev,
|
||||
"%s timed out, cancelling all in-flight rendering.\n",
|
||||
w->name);
|
||||
i915_gem_set_wedged(w->i915);
|
||||
}
|
||||
|
||||
static void __init_wedge(struct wedge_me *w,
|
||||
struct drm_i915_private *i915,
|
||||
long timeout,
|
||||
const char *name)
|
||||
{
|
||||
w->i915 = i915;
|
||||
w->name = name;
|
||||
|
||||
INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
|
||||
schedule_delayed_work(&w->work, timeout);
|
||||
}
|
||||
|
||||
static void __fini_wedge(struct wedge_me *w)
|
||||
{
|
||||
cancel_delayed_work_sync(&w->work);
|
||||
destroy_delayed_work_on_stack(&w->work);
|
||||
w->i915 = NULL;
|
||||
}
|
||||
|
||||
#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
|
||||
for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
|
||||
(W)->i915; \
|
||||
__fini_wedge((W)))
|
||||
|
||||
/**
|
||||
* i915_reset_and_wakeup - do process context error handling work
|
||||
* i915_reset_device - do process context error handling work
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* Fire an error uevent so userspace can see that a hang or error
|
||||
* was detected.
|
||||
*/
|
||||
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
|
||||
static void i915_reset_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
|
||||
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
||||
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
||||
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
||||
struct wedge_me w;
|
||||
|
||||
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
|
||||
|
||||
DRM_DEBUG_DRIVER("resetting chip\n");
|
||||
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
|
||||
|
||||
intel_prepare_reset(dev_priv);
|
||||
/* Use a watchdog to ensure that our reset completes */
|
||||
i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
|
||||
intel_prepare_reset(dev_priv);
|
||||
|
||||
set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
|
||||
wake_up_all(&dev_priv->gpu_error.wait_queue);
|
||||
/* Signal that locked waiters should reset the GPU */
|
||||
set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
|
||||
wake_up_all(&dev_priv->gpu_error.wait_queue);
|
||||
|
||||
do {
|
||||
/*
|
||||
* All state reset _must_ be completed before we update the
|
||||
* reset counter, for otherwise waiters might miss the reset
|
||||
* pending state and not properly drop locks, resulting in
|
||||
* deadlocks with the reset work.
|
||||
/* Wait for anyone holding the lock to wakeup, without
|
||||
* blocking indefinitely on struct_mutex.
|
||||
*/
|
||||
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
||||
i915_reset(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
do {
|
||||
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
||||
i915_reset(dev_priv, 0);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
|
||||
I915_RESET_HANDOFF,
|
||||
TASK_UNINTERRUPTIBLE,
|
||||
1));
|
||||
|
||||
/* We need to wait for anyone holding the lock to wakeup */
|
||||
} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
|
||||
I915_RESET_HANDOFF,
|
||||
TASK_UNINTERRUPTIBLE,
|
||||
HZ));
|
||||
|
||||
intel_finish_reset(dev_priv);
|
||||
intel_finish_reset(dev_priv);
|
||||
}
|
||||
|
||||
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
|
||||
kobject_uevent_env(kobj,
|
||||
KOBJ_CHANGE, reset_done_event);
|
||||
|
||||
/*
|
||||
* Note: The wake_up also serves as a memory barrier so that
|
||||
* waiters see the updated value of the dev_priv->gpu_error.
|
||||
*/
|
||||
clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
|
||||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_err_print_instdone(struct drm_i915_private *dev_priv,
|
||||
struct intel_instdone *instdone)
|
||||
{
|
||||
int slice;
|
||||
int subslice;
|
||||
|
||||
pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
|
||||
|
||||
if (INTEL_GEN(dev_priv) <= 3)
|
||||
return;
|
||||
|
||||
pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
|
||||
|
||||
if (INTEL_GEN(dev_priv) <= 6)
|
||||
return;
|
||||
|
||||
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
|
||||
pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
|
||||
slice, subslice, instdone->sampler[slice][subslice]);
|
||||
|
||||
for_each_instdone_slice_subslice(dev_priv, slice, subslice)
|
||||
pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
|
||||
slice, subslice, instdone->row[slice][subslice]);
|
||||
}
|
||||
|
||||
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
|
||||
@@ -2722,6 +2682,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
|
||||
u32 engine_mask,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int tmp;
|
||||
va_list args;
|
||||
char error_msg[80];
|
||||
|
||||
@@ -2741,14 +2703,56 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
|
||||
i915_capture_error_state(dev_priv, engine_mask, error_msg);
|
||||
i915_clear_error_registers(dev_priv);
|
||||
|
||||
/*
|
||||
* Try engine reset when available. We fall back to full reset if
|
||||
* single reset fails.
|
||||
*/
|
||||
if (intel_has_reset_engine(dev_priv)) {
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
|
||||
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
||||
&dev_priv->gpu_error.flags))
|
||||
continue;
|
||||
|
||||
if (i915_reset_engine(engine, 0) == 0)
|
||||
engine_mask &= ~intel_engine_flag(engine);
|
||||
|
||||
clear_bit(I915_RESET_ENGINE + engine->id,
|
||||
&dev_priv->gpu_error.flags);
|
||||
wake_up_bit(&dev_priv->gpu_error.flags,
|
||||
I915_RESET_ENGINE + engine->id);
|
||||
}
|
||||
}
|
||||
|
||||
if (!engine_mask)
|
||||
goto out;
|
||||
|
||||
if (test_and_set_bit(I915_RESET_BACKOFF,
|
||||
&dev_priv->gpu_error.flags))
|
||||
/* Full reset needs the mutex, stop any other user trying to do so. */
|
||||
if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
|
||||
wait_event(dev_priv->gpu_error.reset_queue,
|
||||
!test_bit(I915_RESET_BACKOFF,
|
||||
&dev_priv->gpu_error.flags));
|
||||
goto out;
|
||||
}
|
||||
|
||||
i915_reset_and_wakeup(dev_priv);
|
||||
/* Prevent any other reset-engine attempt. */
|
||||
for_each_engine(engine, dev_priv, tmp) {
|
||||
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
|
||||
&dev_priv->gpu_error.flags))
|
||||
wait_on_bit(&dev_priv->gpu_error.flags,
|
||||
I915_RESET_ENGINE + engine->id,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
i915_reset_device(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, tmp) {
|
||||
clear_bit(I915_RESET_ENGINE + engine->id,
|
||||
&dev_priv->gpu_error.flags);
|
||||
}
|
||||
|
||||
clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
|
||||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
@@ -3009,7 +3013,7 @@ static void gen8_irq_reset(struct drm_device *dev)
|
||||
}
|
||||
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask)
|
||||
u8 pipe_mask)
|
||||
{
|
||||
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
|
||||
enum pipe pipe;
|
||||
@@ -3023,7 +3027,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask)
|
||||
u8 pipe_mask)
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
||||
@@ -3427,7 +3431,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
u32 de_misc_masked = GEN8_DE_MISC_GSE;
|
||||
enum pipe pipe;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
|
||||
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
@@ -3610,34 +3614,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
/*
|
||||
* Returns true when a page flip has completed.
|
||||
*/
|
||||
static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
|
||||
int plane, int pipe, u32 iir)
|
||||
{
|
||||
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (!intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
goto check_page_flip;
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
* the flip is completed (no longer pending). Since this doesn't raise
|
||||
* an interrupt per se, we watch for the change at vblank.
|
||||
*/
|
||||
if (I915_READ16(ISR) & flip_pending)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
return true;
|
||||
|
||||
check_page_flip:
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
return false;
|
||||
}
|
||||
|
||||
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
@@ -3645,9 +3621,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
u16 iir, new_iir;
|
||||
u32 pipe_stats[2];
|
||||
int pipe;
|
||||
u16 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
irqreturn_t ret;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
@@ -3661,7 +3634,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
if (iir == 0)
|
||||
goto out;
|
||||
|
||||
while (iir & ~flip_mask) {
|
||||
while (iir) {
|
||||
/* Can't rely on pipestat interrupt bit in iir as it might
|
||||
* have been cleared after the pipestat interrupt was received.
|
||||
* It doesn't set the bit in iir again, but it still produces
|
||||
@@ -3683,7 +3656,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
}
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE16(IIR, iir & ~flip_mask);
|
||||
I915_WRITE16(IIR, iir);
|
||||
new_iir = I915_READ16(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
@@ -3694,9 +3667,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
if (HAS_FBC(dev_priv))
|
||||
plane = !plane;
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
|
||||
i8xx_handle_vblank(dev_priv, plane, pipe, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
|
||||
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
|
||||
@@ -3796,45 +3768,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true when a page flip has completed.
|
||||
*/
|
||||
static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
|
||||
int plane, int pipe, u32 iir)
|
||||
{
|
||||
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (!intel_pipe_handle_vblank(dev_priv, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
goto check_page_flip;
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
* the flip is completed (no longer pending). Since this doesn't raise
|
||||
* an interrupt per se, we watch for the change at vblank.
|
||||
*/
|
||||
if (I915_READ(ISR) & flip_pending)
|
||||
goto check_page_flip;
|
||||
|
||||
intel_finish_page_flip_cs(dev_priv, pipe);
|
||||
return true;
|
||||
|
||||
check_page_flip:
|
||||
intel_check_page_flip(dev_priv, pipe);
|
||||
return false;
|
||||
}
|
||||
|
||||
static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
int pipe, ret = IRQ_NONE;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
@@ -3845,7 +3783,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
|
||||
iir = I915_READ(IIR);
|
||||
do {
|
||||
bool irq_received = (iir & ~flip_mask) != 0;
|
||||
bool irq_received = (iir) != 0;
|
||||
bool blc_event = false;
|
||||
|
||||
/* Can't rely on pipestat interrupt bit in iir as it might
|
||||
@@ -3880,7 +3818,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
|
||||
}
|
||||
|
||||
I915_WRITE(IIR, iir & ~flip_mask);
|
||||
I915_WRITE(IIR, iir);
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
@@ -3891,9 +3829,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
if (HAS_FBC(dev_priv))
|
||||
plane = !plane;
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
|
||||
i915_handle_vblank(dev_priv, plane, pipe, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
|
||||
blc_event = true;
|
||||
@@ -3926,7 +3863,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
*/
|
||||
ret = IRQ_HANDLED;
|
||||
iir = new_iir;
|
||||
} while (iir & ~flip_mask);
|
||||
} while (iir);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
@@ -4061,9 +3998,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
u32 iir, new_iir;
|
||||
u32 pipe_stats[I915_MAX_PIPES];
|
||||
int ret = IRQ_NONE, pipe;
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return IRQ_NONE;
|
||||
@@ -4074,7 +4008,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
iir = I915_READ(IIR);
|
||||
|
||||
for (;;) {
|
||||
bool irq_received = (iir & ~flip_mask) != 0;
|
||||
bool irq_received = (iir) != 0;
|
||||
bool blc_event = false;
|
||||
|
||||
/* Can't rely on pipestat interrupt bit in iir as it might
|
||||
@@ -4112,7 +4046,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
|
||||
}
|
||||
|
||||
I915_WRITE(IIR, iir & ~flip_mask);
|
||||
I915_WRITE(IIR, iir);
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
@@ -4121,9 +4055,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
notify_ring(dev_priv->engine[VCS]);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
|
||||
i915_handle_vblank(dev_priv, pipe, pipe, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
|
||||
drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
|
||||
blc_event = true;
|
||||
@@ -4225,16 +4158,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
*
|
||||
* TODO: verify if this can be reproduced on VLV,CHV.
|
||||
*/
|
||||
if (INTEL_INFO(dev_priv)->gen <= 7)
|
||||
if (INTEL_GEN(dev_priv) <= 7)
|
||||
dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8)
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
/* Gen2 doesn't have a hardware frame counter */
|
||||
dev->max_vblank_count = 0;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
|
||||
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
|
||||
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
|
||||
} else {
|
||||
@@ -4281,7 +4214,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
dev->driver->enable_vblank = i965_enable_vblank;
|
||||
dev->driver->disable_vblank = i965_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
|
||||
} else if (INTEL_GEN(dev_priv) >= 8) {
|
||||
dev->driver->irq_handler = gen8_irq_handler;
|
||||
dev->driver->irq_preinstall = gen8_irq_reset;
|
||||
dev->driver->irq_postinstall = gen8_irq_postinstall;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_BDW_H__
|
||||
#define __I915_OA_BDW_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_bdw;
|
||||
|
||||
extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_BXT_H__
|
||||
#define __I915_OA_BXT_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_bxt;
|
||||
|
||||
extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_CHV_H__
|
||||
#define __I915_OA_CHV_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_chv;
|
||||
|
||||
extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_GLK_H__
|
||||
#define __I915_OA_GLK_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_glk;
|
||||
|
||||
extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
@@ -31,17 +31,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_oa_hsw.h"
|
||||
|
||||
enum metric_set_id {
|
||||
METRIC_SET_ID_RENDER_BASIC = 1,
|
||||
METRIC_SET_ID_COMPUTE_BASIC,
|
||||
METRIC_SET_ID_COMPUTE_EXTENDED,
|
||||
METRIC_SET_ID_MEMORY_READS,
|
||||
METRIC_SET_ID_MEMORY_WRITES,
|
||||
METRIC_SET_ID_SAMPLER_BALANCE,
|
||||
};
|
||||
|
||||
int i915_oa_n_builtin_metric_sets_hsw = 6;
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_render_basic[] = {
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
@@ -53,6 +42,7 @@ static const struct i915_oa_reg flex_eu_config_render_basic[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_render_basic[] = {
|
||||
{ _MMIO(0x9840), 0x00000080 },
|
||||
{ _MMIO(0x253a4), 0x01600000 },
|
||||
{ _MMIO(0x25440), 0x00100000 },
|
||||
{ _MMIO(0x25128), 0x00000000 },
|
||||
@@ -114,750 +104,35 @@ static const struct i915_oa_reg mux_config_render_basic[] = {
|
||||
{ _MMIO(0x25428), 0x00042049 },
|
||||
};
|
||||
|
||||
static int
|
||||
get_render_basic_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_render_basic;
|
||||
lens[n] = ARRAY_SIZE(mux_config_render_basic);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_compute_basic[] = {
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0x00800000 },
|
||||
{ _MMIO(0x2718), 0xaaaaaaaa },
|
||||
{ _MMIO(0x271c), 0xaaaaaaaa },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
{ _MMIO(0x2728), 0xaaaaaaaa },
|
||||
{ _MMIO(0x272c), 0xaaaaaaaa },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x2744), 0x00000000 },
|
||||
{ _MMIO(0x2748), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x00000000 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2754), 0x00000000 },
|
||||
{ _MMIO(0x2758), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0x00000000 },
|
||||
{ _MMIO(0x236c), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_compute_basic[] = {
|
||||
{ _MMIO(0x253a4), 0x00000000 },
|
||||
{ _MMIO(0x2681c), 0x01f00800 },
|
||||
{ _MMIO(0x26820), 0x00001000 },
|
||||
{ _MMIO(0x2781c), 0x01f00800 },
|
||||
{ _MMIO(0x26520), 0x00000007 },
|
||||
{ _MMIO(0x265a0), 0x00000007 },
|
||||
{ _MMIO(0x25380), 0x00000010 },
|
||||
{ _MMIO(0x2538c), 0x00300000 },
|
||||
{ _MMIO(0x25384), 0xaa8aaaaa },
|
||||
{ _MMIO(0x25404), 0xffffffff },
|
||||
{ _MMIO(0x26800), 0x00004202 },
|
||||
{ _MMIO(0x26808), 0x00605817 },
|
||||
{ _MMIO(0x2680c), 0x10001005 },
|
||||
{ _MMIO(0x26804), 0x00000000 },
|
||||
{ _MMIO(0x27800), 0x00000102 },
|
||||
{ _MMIO(0x27808), 0x0c0701e0 },
|
||||
{ _MMIO(0x2780c), 0x000200a0 },
|
||||
{ _MMIO(0x27804), 0x00000000 },
|
||||
{ _MMIO(0x26484), 0x44000000 },
|
||||
{ _MMIO(0x26704), 0x44000000 },
|
||||
{ _MMIO(0x26500), 0x00000006 },
|
||||
{ _MMIO(0x26510), 0x00000001 },
|
||||
{ _MMIO(0x26504), 0x88000000 },
|
||||
{ _MMIO(0x26580), 0x00000006 },
|
||||
{ _MMIO(0x26590), 0x00000020 },
|
||||
{ _MMIO(0x26584), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0x55822222 },
|
||||
{ _MMIO(0x26184), 0xaa866666 },
|
||||
{ _MMIO(0x25420), 0x08320c83 },
|
||||
{ _MMIO(0x25424), 0x06820c83 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000c03 },
|
||||
};
|
||||
|
||||
static int
|
||||
get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_compute_basic;
|
||||
lens[n] = ARRAY_SIZE(mux_config_compute_basic);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_compute_extended[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007fe2a },
|
||||
{ _MMIO(0x2774), 0x0000ff00 },
|
||||
{ _MMIO(0x2778), 0x0007fe6a },
|
||||
{ _MMIO(0x277c), 0x0000ff00 },
|
||||
{ _MMIO(0x2780), 0x0007fe92 },
|
||||
{ _MMIO(0x2784), 0x0000ff00 },
|
||||
{ _MMIO(0x2788), 0x0007fea2 },
|
||||
{ _MMIO(0x278c), 0x0000ff00 },
|
||||
{ _MMIO(0x2790), 0x0007fe32 },
|
||||
{ _MMIO(0x2794), 0x0000ff00 },
|
||||
{ _MMIO(0x2798), 0x0007fe9a },
|
||||
{ _MMIO(0x279c), 0x0000ff00 },
|
||||
{ _MMIO(0x27a0), 0x0007ff23 },
|
||||
{ _MMIO(0x27a4), 0x0000ff00 },
|
||||
{ _MMIO(0x27a8), 0x0007fff3 },
|
||||
{ _MMIO(0x27ac), 0x0000fffe },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_compute_extended[] = {
|
||||
{ _MMIO(0x2681c), 0x3eb00800 },
|
||||
{ _MMIO(0x26820), 0x00900000 },
|
||||
{ _MMIO(0x25384), 0x02aaaaaa },
|
||||
{ _MMIO(0x25404), 0x03ffffff },
|
||||
{ _MMIO(0x26800), 0x00142284 },
|
||||
{ _MMIO(0x26808), 0x0e629062 },
|
||||
{ _MMIO(0x2680c), 0x3f6f55cb },
|
||||
{ _MMIO(0x26810), 0x00000014 },
|
||||
{ _MMIO(0x26804), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0x02aaaaaa },
|
||||
{ _MMIO(0x26184), 0x02aaaaaa },
|
||||
{ _MMIO(0x25420), 0x00000000 },
|
||||
{ _MMIO(0x25424), 0x00000000 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static int
|
||||
get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_compute_extended;
|
||||
lens[n] = ARRAY_SIZE(mux_config_compute_extended);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_memory_reads[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x76543298 },
|
||||
{ _MMIO(0x2748), 0x98989898 },
|
||||
{ _MMIO(0x2744), 0x000000e4 },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0x98a98a98 },
|
||||
{ _MMIO(0x2758), 0x88888888 },
|
||||
{ _MMIO(0x2754), 0x000c5500 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007f81a },
|
||||
{ _MMIO(0x2774), 0x0000fc00 },
|
||||
{ _MMIO(0x2778), 0x0007f82a },
|
||||
{ _MMIO(0x277c), 0x0000fc00 },
|
||||
{ _MMIO(0x2780), 0x0007f872 },
|
||||
{ _MMIO(0x2784), 0x0000fc00 },
|
||||
{ _MMIO(0x2788), 0x0007f8ba },
|
||||
{ _MMIO(0x278c), 0x0000fc00 },
|
||||
{ _MMIO(0x2790), 0x0007f87a },
|
||||
{ _MMIO(0x2794), 0x0000fc00 },
|
||||
{ _MMIO(0x2798), 0x0007f8ea },
|
||||
{ _MMIO(0x279c), 0x0000fc00 },
|
||||
{ _MMIO(0x27a0), 0x0007f8e2 },
|
||||
{ _MMIO(0x27a4), 0x0000fc00 },
|
||||
{ _MMIO(0x27a8), 0x0007f8f2 },
|
||||
{ _MMIO(0x27ac), 0x0000fc00 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_memory_reads[] = {
|
||||
{ _MMIO(0x253a4), 0x34300000 },
|
||||
{ _MMIO(0x25440), 0x2d800000 },
|
||||
{ _MMIO(0x25444), 0x00000008 },
|
||||
{ _MMIO(0x25128), 0x0e600000 },
|
||||
{ _MMIO(0x25380), 0x00000450 },
|
||||
{ _MMIO(0x25390), 0x00052c43 },
|
||||
{ _MMIO(0x25384), 0x00000000 },
|
||||
{ _MMIO(0x25400), 0x00006144 },
|
||||
{ _MMIO(0x25408), 0x0a418820 },
|
||||
{ _MMIO(0x2540c), 0x000820e6 },
|
||||
{ _MMIO(0x25404), 0xff500000 },
|
||||
{ _MMIO(0x25100), 0x000005d6 },
|
||||
{ _MMIO(0x2510c), 0x0ef00000 },
|
||||
{ _MMIO(0x25104), 0x00000000 },
|
||||
{ _MMIO(0x25420), 0x02108421 },
|
||||
{ _MMIO(0x25424), 0x00008421 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static int
|
||||
get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_memory_reads;
|
||||
lens[n] = ARRAY_SIZE(mux_config_memory_reads);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_memory_writes[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x76543298 },
|
||||
{ _MMIO(0x2748), 0x98989898 },
|
||||
{ _MMIO(0x2744), 0x000000e4 },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0xbabababa },
|
||||
{ _MMIO(0x2758), 0x88888888 },
|
||||
{ _MMIO(0x2754), 0x000c5500 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007f81a },
|
||||
{ _MMIO(0x2774), 0x0000fc00 },
|
||||
{ _MMIO(0x2778), 0x0007f82a },
|
||||
{ _MMIO(0x277c), 0x0000fc00 },
|
||||
{ _MMIO(0x2780), 0x0007f822 },
|
||||
{ _MMIO(0x2784), 0x0000fc00 },
|
||||
{ _MMIO(0x2788), 0x0007f8ba },
|
||||
{ _MMIO(0x278c), 0x0000fc00 },
|
||||
{ _MMIO(0x2790), 0x0007f87a },
|
||||
{ _MMIO(0x2794), 0x0000fc00 },
|
||||
{ _MMIO(0x2798), 0x0007f8ea },
|
||||
{ _MMIO(0x279c), 0x0000fc00 },
|
||||
{ _MMIO(0x27a0), 0x0007f8e2 },
|
||||
{ _MMIO(0x27a4), 0x0000fc00 },
|
||||
{ _MMIO(0x27a8), 0x0007f8f2 },
|
||||
{ _MMIO(0x27ac), 0x0000fc00 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_memory_writes[] = {
|
||||
{ _MMIO(0x253a4), 0x34300000 },
|
||||
{ _MMIO(0x25440), 0x01500000 },
|
||||
{ _MMIO(0x25444), 0x00000120 },
|
||||
{ _MMIO(0x25128), 0x0c200000 },
|
||||
{ _MMIO(0x25380), 0x00000450 },
|
||||
{ _MMIO(0x25390), 0x00052c43 },
|
||||
{ _MMIO(0x25384), 0x00000000 },
|
||||
{ _MMIO(0x25400), 0x00007184 },
|
||||
{ _MMIO(0x25408), 0x0a418820 },
|
||||
{ _MMIO(0x2540c), 0x000820e6 },
|
||||
{ _MMIO(0x25404), 0xff500000 },
|
||||
{ _MMIO(0x25100), 0x000005d6 },
|
||||
{ _MMIO(0x2510c), 0x1e700000 },
|
||||
{ _MMIO(0x25104), 0x00000000 },
|
||||
{ _MMIO(0x25420), 0x02108421 },
|
||||
{ _MMIO(0x25424), 0x00008421 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static int
|
||||
get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_memory_writes;
|
||||
lens[n] = ARRAY_SIZE(mux_config_memory_writes);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x2744), 0x00800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0x00800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg flex_eu_config_sampler_balance[] = {
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_sampler_balance[] = {
|
||||
{ _MMIO(0x2eb9c), 0x01906400 },
|
||||
{ _MMIO(0x2fb9c), 0x01906400 },
|
||||
{ _MMIO(0x253a4), 0x00000000 },
|
||||
{ _MMIO(0x26b9c), 0x01906400 },
|
||||
{ _MMIO(0x27b9c), 0x01906400 },
|
||||
{ _MMIO(0x27104), 0x00a00000 },
|
||||
{ _MMIO(0x27184), 0x00a50000 },
|
||||
{ _MMIO(0x2e804), 0x00500000 },
|
||||
{ _MMIO(0x2e984), 0x00500000 },
|
||||
{ _MMIO(0x2eb04), 0x00500000 },
|
||||
{ _MMIO(0x2eb80), 0x00000084 },
|
||||
{ _MMIO(0x2eb8c), 0x14200000 },
|
||||
{ _MMIO(0x2eb84), 0x00000000 },
|
||||
{ _MMIO(0x2f804), 0x00050000 },
|
||||
{ _MMIO(0x2f984), 0x00050000 },
|
||||
{ _MMIO(0x2fb04), 0x00050000 },
|
||||
{ _MMIO(0x2fb80), 0x00000084 },
|
||||
{ _MMIO(0x2fb8c), 0x00050800 },
|
||||
{ _MMIO(0x2fb84), 0x00000000 },
|
||||
{ _MMIO(0x25380), 0x00000010 },
|
||||
{ _MMIO(0x2538c), 0x000000c0 },
|
||||
{ _MMIO(0x25384), 0xaa550000 },
|
||||
{ _MMIO(0x25404), 0xffffc000 },
|
||||
{ _MMIO(0x26804), 0x50000000 },
|
||||
{ _MMIO(0x26984), 0x50000000 },
|
||||
{ _MMIO(0x26b04), 0x50000000 },
|
||||
{ _MMIO(0x26b80), 0x00000084 },
|
||||
{ _MMIO(0x26b90), 0x00050800 },
|
||||
{ _MMIO(0x26b84), 0x00000000 },
|
||||
{ _MMIO(0x27804), 0x05000000 },
|
||||
{ _MMIO(0x27984), 0x05000000 },
|
||||
{ _MMIO(0x27b04), 0x05000000 },
|
||||
{ _MMIO(0x27b80), 0x00000084 },
|
||||
{ _MMIO(0x27b90), 0x00000142 },
|
||||
{ _MMIO(0x27b84), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0xa0000000 },
|
||||
{ _MMIO(0x26184), 0xa5000000 },
|
||||
{ _MMIO(0x25424), 0x00008620 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x0004a54a },
|
||||
};
|
||||
|
||||
static int
|
||||
get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_reg **regs,
|
||||
int *lens)
|
||||
{
|
||||
int n = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
|
||||
|
||||
regs[n] = mux_config_sampler_balance;
|
||||
lens[n] = ARRAY_SIZE(mux_config_sampler_balance);
|
||||
n++;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->perf.oa.n_mux_configs = 0;
|
||||
dev_priv->perf.oa.b_counter_regs = NULL;
|
||||
dev_priv->perf.oa.b_counter_regs_len = 0;
|
||||
|
||||
switch (dev_priv->perf.oa.metrics_set) {
|
||||
case METRIC_SET_ID_RENDER_BASIC:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_render_basic_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_render_basic;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_render_basic);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_render_basic;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_render_basic);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_COMPUTE_BASIC:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_compute_basic_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_compute_basic;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_compute_basic);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_compute_basic;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_compute_basic);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_COMPUTE_EXTENDED:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_compute_extended_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_compute_extended;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_compute_extended);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_compute_extended;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_compute_extended);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_MEMORY_READS:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_memory_reads_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_memory_reads;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_memory_reads);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_memory_reads;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_memory_reads);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_MEMORY_WRITES:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_memory_writes_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_memory_writes;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_memory_writes);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_memory_writes;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_memory_writes);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_SAMPLER_BALANCE:
|
||||
dev_priv->perf.oa.n_mux_configs =
|
||||
get_sampler_balance_mux_config(dev_priv,
|
||||
dev_priv->perf.oa.mux_regs,
|
||||
dev_priv->perf.oa.mux_regs_lens);
|
||||
if (dev_priv->perf.oa.n_mux_configs == 0) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set\n");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised to userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_sampler_balance;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_sampler_balance);
|
||||
|
||||
dev_priv->perf.oa.flex_regs =
|
||||
flex_eu_config_sampler_balance;
|
||||
dev_priv->perf.oa.flex_regs_len =
|
||||
ARRAY_SIZE(flex_eu_config_sampler_balance);
|
||||
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_render_basic_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_render_basic_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_render_basic[] = {
|
||||
&dev_attr_render_basic_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_render_basic = {
|
||||
.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212",
|
||||
.attrs = attrs_render_basic,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_compute_basic_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_compute_basic_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_compute_basic[] = {
|
||||
&dev_attr_compute_basic_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_compute_basic = {
|
||||
.name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b",
|
||||
.attrs = attrs_compute_basic,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_compute_extended_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_compute_extended_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_compute_extended[] = {
|
||||
&dev_attr_compute_extended_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_compute_extended = {
|
||||
.name = "3865be28-6982-49fe-9494-e4d1b4795413",
|
||||
.attrs = attrs_compute_extended,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_memory_reads_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_memory_reads_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_memory_reads[] = {
|
||||
&dev_attr_memory_reads_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_memory_reads = {
|
||||
.name = "bb5ed49b-2497-4095-94f6-26ba294db88a",
|
||||
.attrs = attrs_memory_reads,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_memory_writes_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_memory_writes_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_memory_writes[] = {
|
||||
&dev_attr_memory_writes_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_memory_writes = {
|
||||
.name = "3358d639-9b5f-45ab-976d-9b08cbfc6240",
|
||||
.attrs = attrs_memory_writes,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_sampler_balance_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_sampler_balance_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_sampler_balance[] = {
|
||||
&dev_attr_sampler_balance_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_sampler_balance = {
|
||||
.name = "bc274488-b4b6-40c7-90da-b77d7ad16189",
|
||||
.attrs = attrs_sampler_balance,
|
||||
};
|
||||
|
||||
int
|
||||
i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
|
||||
int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
|
||||
int ret = 0;
|
||||
|
||||
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
if (ret)
|
||||
goto error_render_basic;
|
||||
}
|
||||
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
if (ret)
|
||||
goto error_compute_basic;
|
||||
}
|
||||
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
if (ret)
|
||||
goto error_compute_extended;
|
||||
}
|
||||
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
if (ret)
|
||||
goto error_memory_reads;
|
||||
}
|
||||
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
if (ret)
|
||||
goto error_memory_writes;
|
||||
}
|
||||
if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
|
||||
if (ret)
|
||||
goto error_sampler_balance;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_sampler_balance:
|
||||
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
error_memory_writes:
|
||||
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
error_memory_reads:
|
||||
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
error_compute_extended:
|
||||
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
error_compute_basic:
|
||||
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
error_render_basic:
|
||||
return ret;
|
||||
return sprintf(buf, "1\n");
|
||||
}
|
||||
|
||||
void
|
||||
i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
|
||||
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
|
||||
int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
|
||||
strncpy(dev_priv->perf.oa.test_config.uuid,
|
||||
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
|
||||
UUID_STRING_LEN);
|
||||
dev_priv->perf.oa.test_config.id = 1;
|
||||
|
||||
if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
|
||||
dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
|
||||
dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
|
||||
|
||||
dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
|
||||
dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
|
||||
|
||||
dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
|
||||
dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
|
||||
|
||||
dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
|
||||
dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
|
||||
|
||||
dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
|
||||
|
||||
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
|
||||
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
|
||||
dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
|
||||
}
|
||||
|
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_HSW_H__
|
||||
#define __I915_OA_HSW_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_hsw;
|
||||
|
||||
extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_KBLGT2_H__
|
||||
#define __I915_OA_KBLGT2_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_kblgt2;
|
||||
|
||||
extern int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_KBLGT3_H__
|
||||
#define __I915_OA_KBLGT3_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_kblgt3;
|
||||
|
||||
extern int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_SKLGT2_H__
|
||||
#define __I915_OA_SKLGT2_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_sklgt2;
|
||||
|
||||
extern int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_SKLGT3_H__
|
||||
#define __I915_OA_SKLGT3_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_sklgt3;
|
||||
|
||||
extern int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -29,12 +29,6 @@
|
||||
#ifndef __I915_OA_SKLGT4_H__
|
||||
#define __I915_OA_SKLGT4_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_sklgt4;
|
||||
|
||||
extern int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv);
|
||||
extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
||||
|
@@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.prefault_disable = 0,
|
||||
.load_detect_test = 0,
|
||||
.force_reset_modeset_test = 0,
|
||||
.reset = true,
|
||||
.reset = 2,
|
||||
.error_capture = true,
|
||||
.invert_brightness = 0,
|
||||
.disable_display = 0,
|
||||
@@ -115,8 +115,12 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
|
||||
"Override/Ignore selection of SDVO panel mode in the VBT "
|
||||
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
|
||||
|
||||
module_param_named_unsafe(reset, i915.reset, bool, 0600);
|
||||
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
|
||||
module_param_named_unsafe(reset, i915.reset, int, 0600);
|
||||
MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
|
||||
|
||||
module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400);
|
||||
MODULE_PARM_DESC(vbt_firmware,
|
||||
"Load VBT from specified file under /lib/firmware");
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
||||
module_param_named(error_capture, i915.error_capture, bool, 0600);
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include <linux/cache.h> /* for __read_mostly */
|
||||
|
||||
#define I915_PARAMS_FOR_EACH(func) \
|
||||
func(char *, vbt_firmware); \
|
||||
func(int, modeset); \
|
||||
func(int, panel_ignore_lid); \
|
||||
func(int, semaphores); \
|
||||
@@ -51,6 +52,7 @@
|
||||
func(int, use_mmio_flip); \
|
||||
func(int, mmio_debug); \
|
||||
func(int, edp_vswing); \
|
||||
func(int, reset); \
|
||||
func(unsigned int, inject_load_failure); \
|
||||
/* leave bools at the end to not create holes */ \
|
||||
func(bool, alpha_support); \
|
||||
@@ -60,7 +62,6 @@
|
||||
func(bool, prefault_disable); \
|
||||
func(bool, load_detect_test); \
|
||||
func(bool, force_reset_modeset_test); \
|
||||
func(bool, reset); \
|
||||
func(bool, error_capture); \
|
||||
func(bool, disable_display); \
|
||||
func(bool, verbose_state_checks); \
|
||||
|
@@ -310,7 +310,8 @@ static const struct intel_device_info intel_haswell_info = {
|
||||
BDW_COLORS, \
|
||||
.has_logical_ring_contexts = 1, \
|
||||
.has_full_48bit_ppgtt = 1, \
|
||||
.has_64bit_reloc = 1
|
||||
.has_64bit_reloc = 1, \
|
||||
.has_reset_engine = 1
|
||||
|
||||
#define BDW_PLATFORM \
|
||||
BDW_FEATURES, \
|
||||
@@ -342,6 +343,7 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
.has_gmch_display = 1,
|
||||
.has_aliasing_ppgtt = 1,
|
||||
.has_full_ppgtt = 1,
|
||||
.has_reset_engine = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_CHV_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
@@ -387,6 +389,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
.has_aliasing_ppgtt = 1, \
|
||||
.has_full_ppgtt = 1, \
|
||||
.has_full_48bit_ppgtt = 1, \
|
||||
.has_reset_engine = 1, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
IVB_CURSOR_OFFSETS, \
|
||||
BDW_COLORS
|
||||
@@ -395,6 +398,7 @@ static const struct intel_device_info intel_broxton_info = {
|
||||
GEN9_LP_FEATURES,
|
||||
.platform = INTEL_BROXTON,
|
||||
.ddb_size = 512,
|
||||
.has_reset_engine = false,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_geminilake_info = {
|
||||
@@ -446,6 +450,7 @@ static const struct intel_device_info intel_cannonlake_info = {
|
||||
.gen = 10,
|
||||
.ddb_size = 1024,
|
||||
.has_csr = 1,
|
||||
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
|
||||
};
|
||||
|
||||
/*
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -49,12 +49,18 @@ enum vgt_g2v_type {
|
||||
VGT_G2V_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
* VGT capabilities type
|
||||
*/
|
||||
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
|
||||
|
||||
struct vgt_if {
|
||||
u64 magic; /* VGT_MAGIC */
|
||||
u16 version_major;
|
||||
u16 version_minor;
|
||||
u32 vgt_id; /* ID of vGT instance */
|
||||
u32 rsv1[12]; /* pad to offset 0x40 */
|
||||
u32 vgt_caps; /* VGT capabilities */
|
||||
u32 rsv1[11]; /* pad to offset 0x40 */
|
||||
/*
|
||||
* Data structure to describe the balooning info of resources.
|
||||
* Each VM can only have one portion of continuous area for now.
|
||||
|
@@ -25,6 +25,97 @@
|
||||
#ifndef _I915_REG_H_
|
||||
#define _I915_REG_H_
|
||||
|
||||
/**
|
||||
* DOC: The i915 register macro definition style guide
|
||||
*
|
||||
* Follow the style described here for new macros, and while changing existing
|
||||
* macros. Do **not** mass change existing definitions just to update the style.
|
||||
*
|
||||
* Layout
|
||||
* ''''''
|
||||
*
|
||||
* Keep helper macros near the top. For example, _PIPE() and friends.
|
||||
*
|
||||
* Prefix macros that generally should not be used outside of this file with
|
||||
* underscore '_'. For example, _PIPE() and friends, single instances of
|
||||
* registers that are defined solely for the use by function-like macros.
|
||||
*
|
||||
* Avoid using the underscore prefixed macros outside of this file. There are
|
||||
* exceptions, but keep them to a minimum.
|
||||
*
|
||||
* There are two basic types of register definitions: Single registers and
|
||||
* register groups. Register groups are registers which have two or more
|
||||
* instances, for example one per pipe, port, transcoder, etc. Register groups
|
||||
* should be defined using function-like macros.
|
||||
*
|
||||
* For single registers, define the register offset first, followed by register
|
||||
* contents.
|
||||
*
|
||||
* For register groups, define the register instance offsets first, prefixed
|
||||
* with underscore, followed by a function-like macro choosing the right
|
||||
* instance based on the parameter, followed by register contents.
|
||||
*
|
||||
* Define the register contents (i.e. bit and bit field macros) from most
|
||||
* significant to least significant bit. Indent the register content macros
|
||||
* using two extra spaces between ``#define`` and the macro name.
|
||||
*
|
||||
* For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
|
||||
* contents so that they are already shifted in place, and can be directly
|
||||
* OR'd. For convenience, function-like macros may be used to define bit fields,
|
||||
* but do note that the macros may be needed to read as well as write the
|
||||
* register contents.
|
||||
*
|
||||
* Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
|
||||
* the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
|
||||
* to the name.
|
||||
*
|
||||
* Group the register and its contents together without blank lines, separate
|
||||
* from other registers and their contents with one blank line.
|
||||
*
|
||||
* Indent macro values from macro names using TABs. Align values vertically. Use
|
||||
* braces in macro values as needed to avoid unintended precedence after macro
|
||||
* substitution. Use spaces in macro values according to kernel coding
|
||||
* style. Use lower case in hexadecimal values.
|
||||
*
|
||||
* Naming
|
||||
* ''''''
|
||||
*
|
||||
* Try to name registers according to the specs. If the register name changes in
|
||||
* the specs from platform to another, stick to the original name.
|
||||
*
|
||||
* Try to re-use existing register macro definitions. Only add new macros for
|
||||
* new register offsets, or when the register contents have changed enough to
|
||||
* warrant a full redefinition.
|
||||
*
|
||||
* When a register macro changes for a new platform, prefix the new macro using
|
||||
* the platform acronym or generation. For example, ``SKL_`` or ``GEN8_``. The
|
||||
* prefix signifies the start platform/generation using the register.
|
||||
*
|
||||
* When a bit (field) macro changes or gets added for a new platform, while
|
||||
* retaining the existing register macro, add a platform acronym or generation
|
||||
* suffix to the name. For example, ``_SKL`` or ``_GEN8``.
|
||||
*
|
||||
* Examples
|
||||
* ''''''''
|
||||
*
|
||||
* (Note that the values in the example are indented using spaces instead of
|
||||
* TABs to avoid misalignment in generated documentation. Use TABs in the
|
||||
* definitions.)::
|
||||
*
|
||||
* #define _FOO_A 0xf000
|
||||
* #define _FOO_B 0xf001
|
||||
* #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
|
||||
* #define FOO_ENABLE (1 << 31)
|
||||
* #define FOO_MODE_MASK (0xf << 16)
|
||||
* #define FOO_MODE_SHIFT 16
|
||||
* #define FOO_MODE_BAR (0 << 16)
|
||||
* #define FOO_MODE_BAZ (1 << 16)
|
||||
* #define FOO_MODE_QUX_SNB (2 << 16)
|
||||
*
|
||||
* #define BAR _MMIO(0xb000)
|
||||
* #define GEN8_BAR _MMIO(0xb888)
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
uint32_t reg;
|
||||
} i915_reg_t;
|
||||
@@ -229,6 +320,28 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN8_RPCS_EU_MIN_SHIFT 0
|
||||
#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
|
||||
|
||||
#define WAIT_FOR_RC6_EXIT _MMIO(0x20CC)
|
||||
/* HSW only */
|
||||
#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
|
||||
#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
|
||||
#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
|
||||
#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
|
||||
/* HSW+ */
|
||||
#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
|
||||
#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
|
||||
#define HSW_RCS_INHIBIT (1 << 8)
|
||||
/* Gen8 */
|
||||
#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
|
||||
#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
|
||||
#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
|
||||
#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
|
||||
#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
|
||||
#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
|
||||
#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
|
||||
#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
|
||||
#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
|
||||
#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
|
||||
|
||||
#define GAM_ECOCHK _MMIO(0x4090)
|
||||
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
|
||||
#define ECOCHK_SNB_BIT (1<<10)
|
||||
@@ -729,119 +842,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define EU_PERF_CNTL5 _MMIO(0xe55c)
|
||||
#define EU_PERF_CNTL6 _MMIO(0xe65c)
|
||||
|
||||
#define GDT_CHICKEN_BITS _MMIO(0x9840)
|
||||
#define GT_NOA_ENABLE 0x00000080
|
||||
|
||||
/*
|
||||
* OA Boolean state
|
||||
*/
|
||||
|
||||
#define OAREPORTTRIG1 _MMIO(0x2740)
|
||||
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG2 _MMIO(0x2744)
|
||||
#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG3 _MMIO(0x2748)
|
||||
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG4 _MMIO(0x274c)
|
||||
#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG5 _MMIO(0x2750)
|
||||
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG6 _MMIO(0x2754)
|
||||
#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG7 _MMIO(0x2758)
|
||||
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG8 _MMIO(0x275c)
|
||||
#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OASTARTTRIG1 _MMIO(0x2710)
|
||||
#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
|
||||
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
|
||||
@@ -956,6 +960,112 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24
|
||||
#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG1 _MMIO(0x2740)
|
||||
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG2 _MMIO(0x2744)
|
||||
#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG3 _MMIO(0x2748)
|
||||
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG4 _MMIO(0x274c)
|
||||
#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG5 _MMIO(0x2750)
|
||||
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG6 _MMIO(0x2754)
|
||||
#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG7 _MMIO(0x2758)
|
||||
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG8 _MMIO(0x275c)
|
||||
#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
/* CECX_0 */
|
||||
#define OACEC_COMPARE_LESS_OR_EQUAL 6
|
||||
#define OACEC_COMPARE_NOT_EQUAL 5
|
||||
@@ -994,6 +1104,51 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define OACEC7_0 _MMIO(0x27a8)
|
||||
#define OACEC7_1 _MMIO(0x27ac)
|
||||
|
||||
/* OA perf counters */
|
||||
#define OA_PERFCNT1_LO _MMIO(0x91B8)
|
||||
#define OA_PERFCNT1_HI _MMIO(0x91BC)
|
||||
#define OA_PERFCNT2_LO _MMIO(0x91C0)
|
||||
#define OA_PERFCNT2_HI _MMIO(0x91C4)
|
||||
|
||||
#define OA_PERFMATRIX_LO _MMIO(0x91C8)
|
||||
#define OA_PERFMATRIX_HI _MMIO(0x91CC)
|
||||
|
||||
/* RPM unit config (Gen8+) */
|
||||
#define RPM_CONFIG0 _MMIO(0x0D00)
|
||||
#define RPM_CONFIG1 _MMIO(0x0D04)
|
||||
|
||||
/* RPC unit config (Gen8+) */
|
||||
#define RPM_CONFIG _MMIO(0x0D08)
|
||||
|
||||
/* NOA (Gen8+) */
|
||||
#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4)
|
||||
|
||||
#define MICRO_BP0_0 _MMIO(0x9800)
|
||||
#define MICRO_BP0_2 _MMIO(0x9804)
|
||||
#define MICRO_BP0_1 _MMIO(0x9808)
|
||||
|
||||
#define MICRO_BP1_0 _MMIO(0x980C)
|
||||
#define MICRO_BP1_2 _MMIO(0x9810)
|
||||
#define MICRO_BP1_1 _MMIO(0x9814)
|
||||
|
||||
#define MICRO_BP2_0 _MMIO(0x9818)
|
||||
#define MICRO_BP2_2 _MMIO(0x981C)
|
||||
#define MICRO_BP2_1 _MMIO(0x9820)
|
||||
|
||||
#define MICRO_BP3_0 _MMIO(0x9824)
|
||||
#define MICRO_BP3_2 _MMIO(0x9828)
|
||||
#define MICRO_BP3_1 _MMIO(0x982C)
|
||||
|
||||
#define MICRO_BP_TRIGGER _MMIO(0x9830)
|
||||
#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
|
||||
#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
|
||||
#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
|
||||
|
||||
#define GDT_CHICKEN_BITS _MMIO(0x9840)
|
||||
#define GT_NOA_ENABLE 0x00000080
|
||||
|
||||
#define NOA_DATA _MMIO(0x986C)
|
||||
#define NOA_WRITE _MMIO(0x9888)
|
||||
|
||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
|
||||
@@ -1063,9 +1218,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
|
||||
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
|
||||
|
||||
/* See the PUNIT HAS v0.8 for the below bits */
|
||||
enum punit_power_well {
|
||||
/* These numbers are fixed and must match the position of the pw bits */
|
||||
/*
|
||||
* i915_power_well_id:
|
||||
*
|
||||
* Platform specific IDs used to look up power wells and - except for custom
|
||||
* power wells - to define request/status register flag bit positions. As such
|
||||
* the set of IDs on a given platform must be unique and except for custom
|
||||
* power wells their value must stay fixed.
|
||||
*/
|
||||
enum i915_power_well_id {
|
||||
/*
|
||||
* I830
|
||||
* - custom power well
|
||||
*/
|
||||
I830_DISP_PW_PIPES = 0,
|
||||
|
||||
/*
|
||||
* VLV/CHV
|
||||
* - PUNIT_REG_PWRGT_CTRL (bit: id*2),
|
||||
* PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8)
|
||||
*/
|
||||
PUNIT_POWER_WELL_RENDER = 0,
|
||||
PUNIT_POWER_WELL_MEDIA = 1,
|
||||
PUNIT_POWER_WELL_DISP2D = 3,
|
||||
@@ -1077,14 +1249,20 @@ enum punit_power_well {
|
||||
PUNIT_POWER_WELL_DPIO_RX0 = 10,
|
||||
PUNIT_POWER_WELL_DPIO_RX1 = 11,
|
||||
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
|
||||
/* - custom power well */
|
||||
CHV_DISP_PW_PIPE_A, /* 13 */
|
||||
|
||||
/* Not actual bit groups. Used as IDs for lookup_power_well() */
|
||||
PUNIT_POWER_WELL_ALWAYS_ON,
|
||||
};
|
||||
/*
|
||||
* HSW/BDW
|
||||
* - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
|
||||
*/
|
||||
HSW_DISP_PW_GLOBAL = 15,
|
||||
|
||||
enum skl_disp_power_wells {
|
||||
/* These numbers are fixed and must match the position of the pw bits */
|
||||
SKL_DISP_PW_MISC_IO,
|
||||
/*
|
||||
* GEN9+
|
||||
* - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
|
||||
*/
|
||||
SKL_DISP_PW_MISC_IO = 0,
|
||||
SKL_DISP_PW_DDI_A_E,
|
||||
GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
|
||||
CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
|
||||
@@ -1103,17 +1281,19 @@ enum skl_disp_power_wells {
|
||||
SKL_DISP_PW_1 = 14,
|
||||
SKL_DISP_PW_2,
|
||||
|
||||
/* Not actual bit groups. Used as IDs for lookup_power_well() */
|
||||
SKL_DISP_PW_ALWAYS_ON,
|
||||
/* - custom power wells */
|
||||
SKL_DISP_PW_DC_OFF,
|
||||
|
||||
BXT_DPIO_CMN_A,
|
||||
BXT_DPIO_CMN_BC,
|
||||
GLK_DPIO_CMN_C,
|
||||
};
|
||||
GLK_DPIO_CMN_C, /* 19 */
|
||||
|
||||
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
|
||||
#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
|
||||
/*
|
||||
* Multiple platforms.
|
||||
* Must start following the highest ID of any platform.
|
||||
* - custom power wells
|
||||
*/
|
||||
I915_DISP_PW_ALWAYS_ON = 20,
|
||||
};
|
||||
|
||||
#define PUNIT_REG_PWRGT_CTRL 0x60
|
||||
#define PUNIT_REG_PWRGT_STATUS 0x61
|
||||
@@ -2156,6 +2336,7 @@ enum skl_disp_power_wells {
|
||||
#define DONE_REG _MMIO(0x40b0)
|
||||
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
|
||||
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
|
||||
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4)
|
||||
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
|
||||
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
|
||||
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
|
||||
@@ -3522,7 +3703,7 @@ enum skl_disp_power_wells {
|
||||
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_US(us) : \
|
||||
INTERVAL_1_33_US(us)) : \
|
||||
@@ -3531,7 +3712,7 @@ enum skl_disp_power_wells {
|
||||
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
|
||||
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
|
||||
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
|
||||
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
|
||||
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_TO_US(interval) : \
|
||||
INTERVAL_1_33_TO_US(interval)) : \
|
||||
@@ -3783,6 +3964,7 @@ enum {
|
||||
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
|
||||
#define EDP_PSR_ENABLE (1<<31)
|
||||
#define BDW_PSR_SINGLE_FRAME (1<<30)
|
||||
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
|
||||
#define EDP_PSR_LINK_STANDBY (1<<27)
|
||||
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
|
||||
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
|
||||
@@ -5227,6 +5409,9 @@ enum {
|
||||
|
||||
#define _PIPE_MISC_A 0x70030
|
||||
#define _PIPE_MISC_B 0x71030
|
||||
#define PIPEMISC_YUV420_ENABLE (1<<27)
|
||||
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
|
||||
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
|
||||
#define PIPEMISC_DITHER_BPC_MASK (7<<5)
|
||||
#define PIPEMISC_DITHER_8_BPC (0<<5)
|
||||
#define PIPEMISC_DITHER_10_BPC (1<<5)
|
||||
@@ -6106,6 +6291,10 @@ enum {
|
||||
#define _PLANE_KEYMSK_2_A 0x70298
|
||||
#define _PLANE_KEYMAX_1_A 0x701a0
|
||||
#define _PLANE_KEYMAX_2_A 0x702a0
|
||||
#define _PLANE_AUX_DIST_1_A 0x701c0
|
||||
#define _PLANE_AUX_DIST_2_A 0x702c0
|
||||
#define _PLANE_AUX_OFFSET_1_A 0x701c4
|
||||
#define _PLANE_AUX_OFFSET_2_A 0x702c4
|
||||
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
|
||||
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
|
||||
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
|
||||
@@ -6212,6 +6401,24 @@ enum {
|
||||
#define PLANE_NV12_BUF_CFG(pipe, plane) \
|
||||
_MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
|
||||
|
||||
#define _PLANE_AUX_DIST_1_B 0x711c0
|
||||
#define _PLANE_AUX_DIST_2_B 0x712c0
|
||||
#define _PLANE_AUX_DIST_1(pipe) \
|
||||
_PIPE(pipe, _PLANE_AUX_DIST_1_A, _PLANE_AUX_DIST_1_B)
|
||||
#define _PLANE_AUX_DIST_2(pipe) \
|
||||
_PIPE(pipe, _PLANE_AUX_DIST_2_A, _PLANE_AUX_DIST_2_B)
|
||||
#define PLANE_AUX_DIST(pipe, plane) \
|
||||
_MMIO_PLANE(plane, _PLANE_AUX_DIST_1(pipe), _PLANE_AUX_DIST_2(pipe))
|
||||
|
||||
#define _PLANE_AUX_OFFSET_1_B 0x711c4
|
||||
#define _PLANE_AUX_OFFSET_2_B 0x712c4
|
||||
#define _PLANE_AUX_OFFSET_1(pipe) \
|
||||
_PIPE(pipe, _PLANE_AUX_OFFSET_1_A, _PLANE_AUX_OFFSET_1_B)
|
||||
#define _PLANE_AUX_OFFSET_2(pipe) \
|
||||
_PIPE(pipe, _PLANE_AUX_OFFSET_2_A, _PLANE_AUX_OFFSET_2_B)
|
||||
#define PLANE_AUX_OFFSET(pipe, plane) \
|
||||
_MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
|
||||
|
||||
#define _PLANE_COLOR_CTL_1_B 0x711CC
|
||||
#define _PLANE_COLOR_CTL_2_B 0x712CC
|
||||
#define _PLANE_COLOR_CTL_3_B 0x713CC
|
||||
@@ -6695,6 +6902,7 @@ enum {
|
||||
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
|
||||
|
||||
#define CHICKEN_PAR1_1 _MMIO(0x42080)
|
||||
#define SKL_RC_HASH_OUTSIDE (1 << 15)
|
||||
#define DPA_MASK_VBLANK_SRD (1 << 15)
|
||||
#define FORCE_ARB_IDLE_PLANES (1 << 14)
|
||||
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
|
||||
@@ -6703,12 +6911,10 @@ enum {
|
||||
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
|
||||
|
||||
#define CHICKEN_MISC_2 _MMIO(0x42084)
|
||||
#define GLK_CL0_PWR_DOWN (1 << 10)
|
||||
#define GLK_CL1_PWR_DOWN (1 << 11)
|
||||
#define CNL_COMP_PWR_DOWN (1 << 23)
|
||||
#define GLK_CL2_PWR_DOWN (1 << 12)
|
||||
|
||||
#define CHICKEN_MISC_2 _MMIO(0x42084)
|
||||
#define COMP_PWR_DOWN (1 << 23)
|
||||
#define GLK_CL1_PWR_DOWN (1 << 11)
|
||||
#define GLK_CL0_PWR_DOWN (1 << 10)
|
||||
|
||||
#define _CHICKEN_PIPESL_1_A 0x420b0
|
||||
#define _CHICKEN_PIPESL_1_B 0x420b4
|
||||
@@ -7984,12 +8190,31 @@ enum {
|
||||
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
|
||||
|
||||
/* HSW Power Wells */
|
||||
#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */
|
||||
#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */
|
||||
#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */
|
||||
#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */
|
||||
#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
|
||||
#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
|
||||
#define _HSW_PWR_WELL_CTL1 0x45400
|
||||
#define _HSW_PWR_WELL_CTL2 0x45404
|
||||
#define _HSW_PWR_WELL_CTL3 0x45408
|
||||
#define _HSW_PWR_WELL_CTL4 0x4540C
|
||||
|
||||
/*
|
||||
* Each power well control register contains up to 16 (request, status) HW
|
||||
* flag tuples. The register index and HW flag shift is determined by the
|
||||
* power well ID (see i915_power_well_id). There are 4 possible sources of
|
||||
* power well requests each source having its own set of control registers:
|
||||
* BIOS, DRIVER, KVMR, DEBUG.
|
||||
*/
|
||||
#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
|
||||
#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
|
||||
/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
|
||||
#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
|
||||
_HSW_PWR_WELL_CTL1))
|
||||
#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
|
||||
_HSW_PWR_WELL_CTL2))
|
||||
#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
|
||||
#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
|
||||
_HSW_PWR_WELL_CTL4))
|
||||
|
||||
#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
|
||||
#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
|
||||
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
|
||||
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
|
||||
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
|
||||
@@ -7997,11 +8222,17 @@ enum {
|
||||
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
|
||||
|
||||
/* SKL Fuse Status */
|
||||
enum skl_power_gate {
|
||||
SKL_PG0,
|
||||
SKL_PG1,
|
||||
SKL_PG2,
|
||||
};
|
||||
|
||||
#define SKL_FUSE_STATUS _MMIO(0x42000)
|
||||
#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
|
||||
#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
|
||||
#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
|
||||
#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
|
||||
#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
|
||||
/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
|
||||
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
|
||||
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
|
||||
|
||||
/* Per-pipe DDI Function Control */
|
||||
#define _TRANS_DDI_FUNC_CTL_A 0x60400
|
||||
@@ -8343,6 +8574,7 @@ enum {
|
||||
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
|
||||
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
|
||||
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
|
||||
#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
|
||||
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
|
||||
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
|
||||
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
|
||||
@@ -8350,6 +8582,7 @@ enum {
|
||||
#define _CNL_DPLL0_CFGCR1 0x6C004
|
||||
#define _CNL_DPLL1_CFGCR1 0x6C084
|
||||
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
|
||||
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
|
||||
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
|
||||
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
|
||||
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
|
||||
|
@@ -101,6 +101,4 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
|
||||
#define igt_timeout(t, fmt, ...) \
|
||||
__igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
#define igt_can_mi_store_dword_imm(D) (INTEL_GEN(D) > 2)
|
||||
|
||||
#endif /* !__I915_SELFTEST_H__ */
|
||||
|
@@ -96,7 +96,7 @@ static struct attribute *rc6_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group rc6_attr_group = {
|
||||
static const struct attribute_group rc6_attr_group = {
|
||||
.name = power_group_name,
|
||||
.attrs = rc6_attrs
|
||||
};
|
||||
@@ -107,7 +107,7 @@ static struct attribute *rc6p_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group rc6p_attr_group = {
|
||||
static const struct attribute_group rc6p_attr_group = {
|
||||
.name = power_group_name,
|
||||
.attrs = rc6p_attrs
|
||||
};
|
||||
@@ -117,7 +117,7 @@ static struct attribute *media_rc6_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group media_rc6_attr_group = {
|
||||
static const struct attribute_group media_rc6_attr_group = {
|
||||
.name = power_group_name,
|
||||
.attrs = media_rc6_attrs
|
||||
};
|
||||
@@ -209,7 +209,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
||||
memcpy(*remap_info + (offset/4), buf, count);
|
||||
|
||||
/* NB: We defer the remapping until we switch to the context */
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link)
|
||||
ctx->remap_slice |= (1<<slice);
|
||||
|
||||
ret = count;
|
||||
@@ -220,7 +220,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bin_attribute dpf_attrs = {
|
||||
static const struct bin_attribute dpf_attrs = {
|
||||
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
|
||||
.size = GEN7_L3LOG_SIZE,
|
||||
.read = i915_l3_read,
|
||||
@@ -229,7 +229,7 @@ static struct bin_attribute dpf_attrs = {
|
||||
.private = (void *)0
|
||||
};
|
||||
|
||||
static struct bin_attribute dpf_attrs_1 = {
|
||||
static const struct bin_attribute dpf_attrs_1 = {
|
||||
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
|
||||
.size = GEN7_L3LOG_SIZE,
|
||||
.read = i915_l3_read,
|
||||
@@ -253,7 +253,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
||||
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
|
||||
} else {
|
||||
u32 rpstat = I915_READ(GEN6_RPSTAT1);
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
|
||||
@@ -532,7 +532,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute error_state_attr = {
|
||||
static const struct bin_attribute error_state_attr = {
|
||||
.attr.name = "error",
|
||||
.attr.mode = S_IRUSR | S_IWUSR,
|
||||
.size = 0,
|
||||
|
@@ -75,10 +75,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
|
||||
return;
|
||||
}
|
||||
|
||||
dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
|
||||
|
||||
dev_priv->vgpu.active = true;
|
||||
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
|
||||
}
|
||||
|
||||
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
|
||||
}
|
||||
|
||||
struct _balloon_info_ {
|
||||
/*
|
||||
* There are up to 2 regions per mappable/unmappable graphic
|
||||
|
@@ -27,6 +27,9 @@
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
void i915_check_vgpu(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
|
||||
|
||||
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
|
||||
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
@@ -579,11 +579,17 @@ err_unpin:
|
||||
|
||||
static void i915_vma_destroy(struct i915_vma *vma)
|
||||
{
|
||||
int i;
|
||||
|
||||
GEM_BUG_ON(vma->node.allocated);
|
||||
GEM_BUG_ON(i915_vma_is_active(vma));
|
||||
GEM_BUG_ON(!i915_vma_is_closed(vma));
|
||||
GEM_BUG_ON(vma->fence);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
|
||||
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
|
||||
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
|
||||
|
||||
list_del(&vma->vm_link);
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
|
||||
@@ -591,33 +597,11 @@ static void i915_vma_destroy(struct i915_vma *vma)
|
||||
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
|
||||
}
|
||||
|
||||
void i915_vma_unlink_ctx(struct i915_vma *vma)
|
||||
{
|
||||
struct i915_gem_context *ctx = vma->ctx;
|
||||
|
||||
if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
|
||||
cancel_work_sync(&ctx->vma_lut.resize);
|
||||
ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
|
||||
}
|
||||
|
||||
__hlist_del(&vma->ctx_node);
|
||||
ctx->vma_lut.ht_count--;
|
||||
|
||||
if (i915_vma_is_ggtt(vma))
|
||||
vma->obj->vma_hashed = NULL;
|
||||
vma->ctx = NULL;
|
||||
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
|
||||
void i915_vma_close(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(i915_vma_is_closed(vma));
|
||||
vma->flags |= I915_VMA_CLOSED;
|
||||
|
||||
if (vma->ctx)
|
||||
i915_vma_unlink_ctx(vma);
|
||||
|
||||
list_del(&vma->obj_link);
|
||||
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
|
||||
|
||||
@@ -680,9 +664,8 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
GEM_BUG_ON(i915_vma_is_active(vma));
|
||||
}
|
||||
GEM_BUG_ON(i915_vma_is_active(vma));
|
||||
|
||||
if (i915_vma_is_pinned(vma))
|
||||
return -EBUSY;
|
||||
|
@@ -112,13 +112,9 @@ struct i915_vma {
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
unsigned int *exec_flags;
|
||||
struct hlist_node exec_node;
|
||||
u32 exec_handle;
|
||||
|
||||
struct i915_gem_context *ctx;
|
||||
struct hlist_node ctx_node;
|
||||
u32 ctx_handle;
|
||||
};
|
||||
|
||||
struct i915_vma *
|
||||
|
@@ -114,6 +114,8 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->dev);
|
||||
struct drm_plane_state *state = &intel_state->base;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->base.adjusted_mode;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@@ -173,6 +175,19 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Y-tiling is not supported in IF-ID Interlace mode in
|
||||
* GEN9 and above.
|
||||
*/
|
||||
if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
|
||||
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
|
||||
state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
|
||||
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME pre-g4x don't work like this */
|
||||
if (intel_state->base.visible)
|
||||
crtc_state->active_planes |= BIT(intel_plane->id);
|
||||
|
@@ -1190,6 +1190,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
if (is_dvi) {
|
||||
info->alternate_ddc_pin = ddc_pin;
|
||||
|
||||
/*
|
||||
* All VBTs that we got so far for B Stepping has this
|
||||
* information wrong for Port D. So, let's just ignore for now.
|
||||
*/
|
||||
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
|
||||
port == PORT_D) {
|
||||
info->alternate_ddc_pin = 0;
|
||||
}
|
||||
|
||||
sanitize_ddc_pin(dev_priv, port);
|
||||
}
|
||||
|
||||
|
@@ -41,6 +41,22 @@
|
||||
|
||||
#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
|
||||
|
||||
/* Post offset values for RGB->YCBCR conversion */
|
||||
#define POSTOFF_RGB_TO_YUV_HI 0x800
|
||||
#define POSTOFF_RGB_TO_YUV_ME 0x100
|
||||
#define POSTOFF_RGB_TO_YUV_LO 0x800
|
||||
|
||||
/*
|
||||
* These values are direct register values specified in the Bspec,
|
||||
* for RGB->YUV conversion matrix (colorspace BT709)
|
||||
*/
|
||||
#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
|
||||
#define CSC_RGB_TO_YUV_BU 0x37e80000
|
||||
#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
|
||||
#define CSC_RGB_TO_YUV_BY 0xb5280000
|
||||
#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
|
||||
#define CSC_RGB_TO_YUV_BV 0x1e080000
|
||||
|
||||
/*
|
||||
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
|
||||
* format). This macro takes the coefficient we want transformed and the
|
||||
@@ -91,6 +107,30 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
|
||||
}
|
||||
}
|
||||
|
||||
void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
int pipe = intel_crtc->pipe;
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
|
||||
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
|
||||
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
|
||||
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
|
||||
|
||||
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
|
||||
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
|
||||
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
|
||||
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
|
||||
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
|
||||
}
|
||||
|
||||
/* Set up the pipe CSC unit. */
|
||||
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
@@ -101,7 +141,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
|
||||
uint16_t coeffs[9] = { 0, };
|
||||
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
|
||||
|
||||
if (crtc_state->ctm) {
|
||||
if (intel_crtc_state->ycbcr420) {
|
||||
i9xx_load_ycbcr_conversion_matrix(intel_crtc);
|
||||
return;
|
||||
} else if (crtc_state->ctm) {
|
||||
struct drm_color_ctm *ctm =
|
||||
(struct drm_color_ctm *)crtc_state->ctm->data;
|
||||
uint64_t input[9] = { 0, };
|
||||
@@ -616,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
|
||||
IS_BROXTON(dev_priv)) {
|
||||
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
|
||||
dev_priv->display.load_luts = broadwell_load_luts;
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
|
||||
dev_priv->display.load_luts = glk_load_luts;
|
||||
} else {
|
||||
|
@@ -802,12 +802,10 @@ void intel_crt_reset(struct drm_encoder *encoder)
|
||||
*/
|
||||
|
||||
static const struct drm_connector_funcs intel_crt_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_crt_destroy,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
};
|
||||
|
@@ -1103,6 +1103,62 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
return dco_freq / (p0 * p1 * p2 * 5);
|
||||
}
|
||||
|
||||
static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
uint32_t pll_id)
|
||||
{
|
||||
uint32_t cfgcr0, cfgcr1;
|
||||
uint32_t p0, p1, p2, dco_freq, ref_clock;
|
||||
|
||||
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
|
||||
cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
|
||||
|
||||
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
|
||||
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
|
||||
|
||||
if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
|
||||
p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
|
||||
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
|
||||
else
|
||||
p1 = 1;
|
||||
|
||||
|
||||
switch (p0) {
|
||||
case DPLL_CFGCR1_PDIV_2:
|
||||
p0 = 2;
|
||||
break;
|
||||
case DPLL_CFGCR1_PDIV_3:
|
||||
p0 = 3;
|
||||
break;
|
||||
case DPLL_CFGCR1_PDIV_5:
|
||||
p0 = 5;
|
||||
break;
|
||||
case DPLL_CFGCR1_PDIV_7:
|
||||
p0 = 7;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (p2) {
|
||||
case DPLL_CFGCR1_KDIV_1:
|
||||
p2 = 1;
|
||||
break;
|
||||
case DPLL_CFGCR1_KDIV_2:
|
||||
p2 = 2;
|
||||
break;
|
||||
case DPLL_CFGCR1_KDIV_4:
|
||||
p2 = 4;
|
||||
break;
|
||||
}
|
||||
|
||||
ref_clock = dev_priv->cdclk.hw.ref;
|
||||
|
||||
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
|
||||
|
||||
dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
|
||||
DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
|
||||
|
||||
return dco_freq / (p0 * p1 * p2 * 5);
|
||||
}
|
||||
|
||||
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
int dotclock;
|
||||
@@ -1118,12 +1174,68 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
||||
else
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
if (pipe_config->ycbcr420)
|
||||
dotclock *= 2;
|
||||
|
||||
if (pipe_config->pixel_multiplier)
|
||||
dotclock /= pipe_config->pixel_multiplier;
|
||||
|
||||
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
static void cnl_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int link_clock = 0;
|
||||
uint32_t cfgcr0, pll_id;
|
||||
|
||||
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
|
||||
|
||||
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
|
||||
|
||||
if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
|
||||
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
|
||||
} else {
|
||||
link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
|
||||
|
||||
switch (link_clock) {
|
||||
case DPLL_CFGCR0_LINK_RATE_810:
|
||||
link_clock = 81000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_1080:
|
||||
link_clock = 108000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_1350:
|
||||
link_clock = 135000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_1620:
|
||||
link_clock = 162000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_2160:
|
||||
link_clock = 216000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_2700:
|
||||
link_clock = 270000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_3240:
|
||||
link_clock = 324000;
|
||||
break;
|
||||
case DPLL_CFGCR0_LINK_RATE_4050:
|
||||
link_clock = 405000;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unsupported link rate\n");
|
||||
break;
|
||||
}
|
||||
link_clock *= 2;
|
||||
}
|
||||
|
||||
pipe_config->port_clock = link_clock;
|
||||
|
||||
ddi_dotclock_get(pipe_config);
|
||||
}
|
||||
|
||||
static void skl_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@@ -1267,6 +1379,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
||||
skl_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_CANNONLAKE(dev_priv))
|
||||
cnl_ddi_clock_get(encoder, pipe_config);
|
||||
}
|
||||
|
||||
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
|
||||
@@ -1868,9 +1982,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
|
||||
if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
|
||||
width = intel_dp->lane_count;
|
||||
rate = intel_dp->link_rate;
|
||||
} else {
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
width = 4;
|
||||
/* Rate is always < than 6GHz for HDMI */
|
||||
} else {
|
||||
MISSING_CASE(type);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -363,7 +363,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
|
||||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
|
||||
(dev_priv->pch_type == PCH_CPT &&
|
||||
(HAS_PCH_CPT(dev_priv) &&
|
||||
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
|
||||
DRM_INFO("Display fused off, disabling\n");
|
||||
info->num_pipes = 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -97,6 +97,9 @@ static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int skl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000 };
|
||||
static const int cnl_rates[] = { 162000, 216000, 270000,
|
||||
324000, 432000, 540000,
|
||||
648000, 810000 };
|
||||
static const int default_rates[] = { 162000, 270000, 540000 };
|
||||
|
||||
/**
|
||||
@@ -229,8 +232,10 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
enum port port = dig_port->port;
|
||||
const int *source_rates;
|
||||
int size;
|
||||
u32 voltage;
|
||||
|
||||
/* This should only be done once */
|
||||
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
|
||||
@@ -238,6 +243,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
source_rates = bxt_rates;
|
||||
size = ARRAY_SIZE(bxt_rates);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
source_rates = cnl_rates;
|
||||
size = ARRAY_SIZE(cnl_rates);
|
||||
voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
|
||||
if (port == PORT_A || port == PORT_D ||
|
||||
voltage == VOLTAGE_INFO_0_85V)
|
||||
size -= 2;
|
||||
} else if (IS_GEN9_BC(dev_priv)) {
|
||||
source_rates = skl_rates;
|
||||
size = ARRAY_SIZE(skl_rates);
|
||||
@@ -322,19 +334,20 @@ static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp)
|
||||
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
|
||||
uint8_t lane_count)
|
||||
{
|
||||
/*
|
||||
* FIXME: we need to synchronize the current link parameters with
|
||||
* hardware readout. Currently fast link training doesn't work on
|
||||
* boot-up.
|
||||
*/
|
||||
if (intel_dp->link_rate == 0 ||
|
||||
intel_dp->link_rate > intel_dp->max_link_rate)
|
||||
if (link_rate == 0 ||
|
||||
link_rate > intel_dp->max_link_rate)
|
||||
return false;
|
||||
|
||||
if (intel_dp->lane_count == 0 ||
|
||||
intel_dp->lane_count > intel_dp_max_lane_count(intel_dp))
|
||||
if (lane_count == 0 ||
|
||||
lane_count > intel_dp_max_lane_count(intel_dp))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -1606,6 +1619,23 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
|
||||
return bpp;
|
||||
}
|
||||
|
||||
static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
|
||||
struct drm_display_mode *m2)
|
||||
{
|
||||
bool bres = false;
|
||||
|
||||
if (m1 && m2)
|
||||
bres = (m1->hdisplay == m2->hdisplay &&
|
||||
m1->hsync_start == m2->hsync_start &&
|
||||
m1->hsync_end == m2->hsync_end &&
|
||||
m1->htotal == m2->htotal &&
|
||||
m1->vdisplay == m2->vdisplay &&
|
||||
m1->vsync_start == m2->vsync_start &&
|
||||
m1->vsync_end == m2->vsync_end &&
|
||||
m1->vtotal == m2->vtotal);
|
||||
return bres;
|
||||
}
|
||||
|
||||
bool
|
||||
intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
@@ -1652,8 +1682,16 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
|
||||
|
||||
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
|
||||
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
|
||||
adjusted_mode);
|
||||
struct drm_display_mode *panel_mode =
|
||||
intel_connector->panel.alt_fixed_mode;
|
||||
struct drm_display_mode *req_mode = &pipe_config->base.mode;
|
||||
|
||||
if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
|
||||
panel_mode = intel_connector->panel.fixed_mode;
|
||||
|
||||
drm_mode_debug_printmodeline(panel_mode);
|
||||
|
||||
intel_fixed_panel_mode(panel_mode, adjusted_mode);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
int ret;
|
||||
@@ -1677,12 +1715,18 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
|
||||
int index;
|
||||
|
||||
index = intel_dp_rate_index(intel_dp->common_rates,
|
||||
intel_dp->num_common_rates,
|
||||
intel_dp->compliance.test_link_rate);
|
||||
if (index >= 0)
|
||||
min_clock = max_clock = index;
|
||||
min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
|
||||
/* Validate the compliance test data since max values
|
||||
* might have changed due to link train fallback.
|
||||
*/
|
||||
if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
|
||||
intel_dp->compliance.test_lane_count)) {
|
||||
index = intel_dp_rate_index(intel_dp->common_rates,
|
||||
intel_dp->num_common_rates,
|
||||
intel_dp->compliance.test_link_rate);
|
||||
if (index >= 0)
|
||||
min_clock = max_clock = index;
|
||||
min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
|
||||
}
|
||||
}
|
||||
DRM_DEBUG_KMS("DP link computation with max lane count %i "
|
||||
"max bw %d pixel clock %iKHz\n",
|
||||
@@ -3963,8 +4007,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
|
||||
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
|
||||
{
|
||||
int status = 0;
|
||||
int min_lane_count = 1;
|
||||
int link_rate_index, test_link_rate;
|
||||
int test_link_rate;
|
||||
uint8_t test_lane_count, test_link_bw;
|
||||
/* (DP CTS 1.2)
|
||||
* 4.3.1.11
|
||||
@@ -3978,10 +4021,6 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
|
||||
return DP_TEST_NAK;
|
||||
}
|
||||
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
|
||||
/* Validate the requested lane count */
|
||||
if (test_lane_count < min_lane_count ||
|
||||
test_lane_count > intel_dp->max_link_lane_count)
|
||||
return DP_TEST_NAK;
|
||||
|
||||
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
|
||||
&test_link_bw);
|
||||
@@ -3989,12 +4028,11 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
|
||||
DRM_DEBUG_KMS("Link Rate read failed\n");
|
||||
return DP_TEST_NAK;
|
||||
}
|
||||
/* Validate the requested link rate */
|
||||
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
|
||||
link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
|
||||
intel_dp->num_common_rates,
|
||||
test_link_rate);
|
||||
if (link_rate_index < 0)
|
||||
|
||||
/* Validate the requested link rate and lane count */
|
||||
if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
|
||||
test_lane_count))
|
||||
return DP_TEST_NAK;
|
||||
|
||||
intel_dp->compliance.test_lane_count = test_lane_count;
|
||||
@@ -4263,7 +4301,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
* Validate the cached values of intel_dp->link_rate and
|
||||
* intel_dp->lane_count before attempting to retrain.
|
||||
*/
|
||||
if (!intel_dp_link_params_valid(intel_dp))
|
||||
if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
|
||||
intel_dp->lane_count))
|
||||
return;
|
||||
|
||||
/* Retrain if Channel EQ or CR not ok */
|
||||
@@ -4418,8 +4457,6 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
u32 bit;
|
||||
|
||||
switch (port->port) {
|
||||
case PORT_A:
|
||||
return true;
|
||||
case PORT_B:
|
||||
bit = SDE_PORTB_HOTPLUG;
|
||||
break;
|
||||
@@ -4443,8 +4480,6 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
u32 bit;
|
||||
|
||||
switch (port->port) {
|
||||
case PORT_A:
|
||||
return true;
|
||||
case PORT_B:
|
||||
bit = SDE_PORTB_HOTPLUG_CPT;
|
||||
break;
|
||||
@@ -4454,12 +4489,28 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
case PORT_D:
|
||||
bit = SDE_PORTD_HOTPLUG_CPT;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port->port);
|
||||
return false;
|
||||
}
|
||||
|
||||
return I915_READ(SDEISR) & bit;
|
||||
}
|
||||
|
||||
static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
u32 bit;
|
||||
|
||||
switch (port->port) {
|
||||
case PORT_A:
|
||||
bit = SDE_PORTA_HOTPLUG_SPT;
|
||||
break;
|
||||
case PORT_E:
|
||||
bit = SDE_PORTE_HOTPLUG_SPT;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port->port);
|
||||
return false;
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
return I915_READ(SDEISR) & bit;
|
||||
@@ -4511,6 +4562,42 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
return I915_READ(PORT_HOTPLUG_STAT) & bit;
|
||||
}
|
||||
|
||||
static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
if (port->port == PORT_A)
|
||||
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
|
||||
else
|
||||
return ibx_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
if (port->port == PORT_A)
|
||||
return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
|
||||
else
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
if (port->port == PORT_A)
|
||||
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
|
||||
else
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
if (port->port == PORT_A)
|
||||
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
|
||||
else
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
@@ -4518,7 +4605,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
enum port port;
|
||||
u32 bit;
|
||||
|
||||
intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
|
||||
port = intel_hpd_pin_to_port(intel_encoder->hpd_pin);
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
bit = BXT_DE_PORT_HP_DDIA;
|
||||
@@ -4547,16 +4634,25 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
if (HAS_PCH_IBX(dev_priv))
|
||||
return ibx_digital_port_connected(dev_priv, port);
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
if (HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
if (IS_GM45(dev_priv))
|
||||
return gm45_digital_port_connected(dev_priv, port);
|
||||
else
|
||||
return g4x_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
if (IS_GEN5(dev_priv))
|
||||
return ilk_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GEN6(dev_priv))
|
||||
return snb_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GEN7(dev_priv))
|
||||
return ivb_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GEN8(dev_priv))
|
||||
return bdw_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GM45(dev_priv))
|
||||
return gm45_digital_port_connected(dev_priv, port);
|
||||
else
|
||||
return g4x_digital_port_connected(dev_priv, port);
|
||||
return spt_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
|
||||
static struct edid *
|
||||
@@ -4950,10 +5046,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs intel_dp_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.force = intel_dp_force,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.late_register = intel_dp_connector_register,
|
||||
@@ -5121,12 +5215,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
|
||||
PANEL_POWER_DOWN_DELAY_SHIFT;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
|
||||
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
|
||||
BXT_POWER_CYCLE_DELAY_SHIFT;
|
||||
if (tmp > 0)
|
||||
seq->t11_t12 = (tmp - 1) * 1000;
|
||||
else
|
||||
seq->t11_t12 = 0;
|
||||
seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
|
||||
BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
|
||||
} else {
|
||||
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
|
||||
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
|
||||
@@ -5177,6 +5267,21 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||
intel_pps_dump_state("cur", &cur);
|
||||
|
||||
vbt = dev_priv->vbt.edp.pps;
|
||||
/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
|
||||
* of 500ms appears to be too short. Ocassionally the panel
|
||||
* just fails to power back on. Increasing the delay to 800ms
|
||||
* seems sufficient to avoid this problem.
|
||||
*/
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
|
||||
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
|
||||
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
|
||||
vbt.t11_t12);
|
||||
}
|
||||
/* T11_T12 delay is special and actually in units of 100ms, but zero
|
||||
* based in the hw (so we need to add 100 ms). But the sw vbt
|
||||
* table multiplies it with 1000 to make it in units of 100usec,
|
||||
* too. */
|
||||
vbt.t11_t12 += 100 * 10;
|
||||
|
||||
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
|
||||
* our hw here, which are all in 100usec. */
|
||||
@@ -5280,7 +5385,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
|
||||
pp_div = I915_READ(regs.pp_ctrl);
|
||||
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
|
||||
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
|
||||
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
|
||||
<< BXT_POWER_CYCLE_DELAY_SHIFT);
|
||||
} else {
|
||||
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
|
||||
@@ -5714,6 +5819,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_display_mode *fixed_mode = NULL;
|
||||
struct drm_display_mode *alt_fixed_mode = NULL;
|
||||
struct drm_display_mode *downclock_mode = NULL;
|
||||
bool has_dpcd;
|
||||
struct drm_display_mode *scan;
|
||||
@@ -5769,13 +5875,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
}
|
||||
intel_connector->edid = edid;
|
||||
|
||||
/* prefer fixed mode from EDID if available */
|
||||
/* prefer fixed mode from EDID if available, save an alt mode also */
|
||||
list_for_each_entry(scan, &connector->probed_modes, head) {
|
||||
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
|
||||
fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
downclock_mode = intel_dp_drrs_init(
|
||||
intel_connector, fixed_mode);
|
||||
break;
|
||||
} else if (!alt_fixed_mode) {
|
||||
alt_fixed_mode = drm_mode_duplicate(dev, scan);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5812,7 +5919,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
pipe_name(pipe));
|
||||
}
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
|
||||
downclock_mode);
|
||||
intel_connector->panel.backlight.power = intel_edp_backlight_power;
|
||||
intel_panel_setup_backlight(connector, pipe);
|
||||
|
||||
@@ -5838,26 +5946,22 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
|
||||
struct intel_encoder *encoder = &intel_dig_port->base;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port);
|
||||
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
encoder->hpd_pin = HPD_PORT_A;
|
||||
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
|
||||
break;
|
||||
case PORT_B:
|
||||
encoder->hpd_pin = HPD_PORT_B;
|
||||
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
|
||||
break;
|
||||
case PORT_C:
|
||||
encoder->hpd_pin = HPD_PORT_C;
|
||||
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
|
||||
break;
|
||||
case PORT_D:
|
||||
encoder->hpd_pin = HPD_PORT_D;
|
||||
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
|
||||
break;
|
||||
case PORT_E:
|
||||
encoder->hpd_pin = HPD_PORT_E;
|
||||
|
||||
/* FIXME: Check VBT for actual wiring of PORT E */
|
||||
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
|
||||
break;
|
||||
|
@@ -98,13 +98,87 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set PWM Frequency divider to match desired frequency in vbt.
|
||||
* The PWM Frequency is calculated as 27Mhz / (F x P).
|
||||
* - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the
|
||||
* EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h)
|
||||
* - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
|
||||
* EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
|
||||
*/
|
||||
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
|
||||
u8 pn, pn_min, pn_max;
|
||||
|
||||
/* Find desired value of (F x P)
|
||||
* Note that, if F x P is out of supported range, the maximum value or
|
||||
* minimum value will applied automatically. So no need to check that.
|
||||
*/
|
||||
freq = dev_priv->vbt.backlight.pwm_freq_hz;
|
||||
DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
|
||||
if (!freq) {
|
||||
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
|
||||
|
||||
/* Use highest possible value of Pn for more granularity of brightness
|
||||
* adjustment while satifying the conditions below.
|
||||
* - Pn is in the range of Pn_min and Pn_max
|
||||
* - F is in the range of 1 and 255
|
||||
* - FxP is within 25% of desired value.
|
||||
* Note: 25% is arbitrary value and may need some tweak.
|
||||
*/
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
|
||||
return false;
|
||||
}
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
|
||||
return false;
|
||||
}
|
||||
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
|
||||
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
|
||||
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
|
||||
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
|
||||
DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (pn = pn_max; pn >= pn_min; pn--) {
|
||||
f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
|
||||
fxp_actual = f << pn;
|
||||
if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
|
||||
return false;
|
||||
}
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
uint8_t dpcd_buf = 0;
|
||||
uint8_t edp_backlight_mode = 0;
|
||||
uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
|
||||
@@ -113,18 +187,15 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
||||
return;
|
||||
}
|
||||
|
||||
new_dpcd_buf = dpcd_buf;
|
||||
edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
|
||||
|
||||
switch (edp_backlight_mode) {
|
||||
case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM:
|
||||
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET:
|
||||
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
|
||||
dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
|
||||
dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
|
||||
}
|
||||
new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
|
||||
new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
|
||||
break;
|
||||
|
||||
/* Do nothing when it is already DPCD mode */
|
||||
@@ -133,6 +204,17 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
||||
break;
|
||||
}
|
||||
|
||||
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
|
||||
if (intel_dp_aux_set_pwm_freq(connector))
|
||||
new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
|
||||
|
||||
if (new_dpcd_buf != dpcd_buf) {
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
|
||||
}
|
||||
}
|
||||
|
||||
set_aux_backlight_enable(intel_dp, true);
|
||||
intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
|
||||
}
|
||||
|
@@ -321,12 +321,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
if (!intel_dp_link_training_channel_equalization(intel_dp))
|
||||
goto failure_handling;
|
||||
|
||||
DRM_DEBUG_KMS("Link Training Passed at Link Rate = %d, Lane count = %d",
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
return;
|
||||
|
||||
failure_handling:
|
||||
DRM_DEBUG_KMS("Link Training failed at link rate = %d, lane count = %d",
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
|
||||
intel_connector->base.base.id,
|
||||
intel_connector->base.name,
|
||||
intel_dp->link_rate, intel_dp->lane_count);
|
||||
if (!intel_dp_get_link_train_fallback_values(intel_dp,
|
||||
intel_dp->link_rate,
|
||||
|
@@ -346,10 +346,8 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_dp_mst_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dp_mst_connector_destroy,
|
||||
@@ -372,6 +370,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
int bpp = 24; /* MST uses fixed bpp */
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
|
||||
if (!intel_dp)
|
||||
return MODE_ERROR;
|
||||
|
||||
max_link_clock = intel_dp_max_link_rate(intel_dp);
|
||||
max_lanes = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
@@ -443,28 +444,6 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
|
||||
{
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
|
||||
&connector->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
|
||||
{
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
|
||||
&connector->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
|
||||
{
|
||||
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
|
||||
@@ -500,31 +479,32 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
|
||||
static void intel_dp_register_mst_connector(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_add_to_fbdev(intel_connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
|
||||
connector);
|
||||
|
||||
drm_connector_register(&intel_connector->base);
|
||||
drm_connector_register(connector);
|
||||
}
|
||||
|
||||
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
|
||||
drm_connector_unregister(connector);
|
||||
|
||||
/* need to nuke the connector */
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_remove_from_fbdev(intel_connector);
|
||||
if (dev_priv->fbdev)
|
||||
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
|
||||
connector);
|
||||
/* prevent race with the check in ->detect */
|
||||
drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL);
|
||||
intel_connector->mst_port = NULL;
|
||||
drm_modeset_unlock_all(dev);
|
||||
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
|
||||
|
||||
drm_connector_unreference(&intel_connector->base);
|
||||
drm_connector_unreference(connector);
|
||||
DRM_DEBUG_KMS("\n");
|
||||
}
|
||||
|
||||
|
@@ -2379,6 +2379,15 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
DRM_DEBUG_KMS("dpll_hw_state: "
|
||||
"cfgcr0: 0x%x, cfgcr1: 0x%x\n",
|
||||
hw_state->cfgcr0,
|
||||
hw_state->cfgcr1);
|
||||
}
|
||||
|
||||
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
|
||||
.enable = cnl_ddi_pll_enable,
|
||||
.disable = cnl_ddi_pll_disable,
|
||||
@@ -2395,7 +2404,7 @@ static const struct dpll_info cnl_plls[] = {
|
||||
static const struct intel_dpll_mgr cnl_pll_mgr = {
|
||||
.dpll_info = cnl_plls,
|
||||
.get_dpll = cnl_get_dpll,
|
||||
.dump_hw_state = skl_dump_hw_state,
|
||||
.dump_hw_state = cnl_dump_hw_state,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -265,6 +265,7 @@ struct intel_encoder {
|
||||
|
||||
struct intel_panel {
|
||||
struct drm_display_mode *fixed_mode;
|
||||
struct drm_display_mode *alt_fixed_mode;
|
||||
struct drm_display_mode *downclock_mode;
|
||||
|
||||
/* backlight */
|
||||
@@ -780,13 +781,15 @@ struct intel_crtc_state {
|
||||
|
||||
/* HDMI High TMDS char rate ratio */
|
||||
bool hdmi_high_tmds_clock_ratio;
|
||||
|
||||
/* output format is YCBCR 4:2:0 */
|
||||
bool ycbcr420;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
struct drm_crtc base;
|
||||
enum pipe pipe;
|
||||
enum plane plane;
|
||||
u8 lut_r[256], lut_g[256], lut_b[256];
|
||||
/*
|
||||
* Whether the crtc and the connected output pipeline is active. Implies
|
||||
* that crtc->enabled is set, i.e. the current mode configuration has
|
||||
@@ -797,9 +800,6 @@ struct intel_crtc {
|
||||
u8 plane_ids_mask;
|
||||
unsigned long long enabled_power_domains;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_flip_work *flip_work;
|
||||
|
||||
atomic_t unpin_work_count;
|
||||
|
||||
/* Display surface base address adjustement for pageflips. Note that on
|
||||
* gen4+ this only adjusts up to a tile, offsets within a tile are
|
||||
@@ -1132,24 +1132,6 @@ intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
|
||||
return dev_priv->plane_to_crtc_mapping[plane];
|
||||
}
|
||||
|
||||
struct intel_flip_work {
|
||||
struct work_struct unpin_work;
|
||||
struct work_struct mmio_work;
|
||||
|
||||
struct drm_crtc *crtc;
|
||||
struct i915_vma *old_vma;
|
||||
struct drm_framebuffer *old_fb;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
atomic_t pending;
|
||||
u32 flip_count;
|
||||
u32 gtt_offset;
|
||||
struct drm_i915_gem_request *flip_queued_req;
|
||||
u32 flip_queued_vblank;
|
||||
u32 flip_ready_vblank;
|
||||
unsigned int rotation;
|
||||
};
|
||||
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_atomic_state *restore_state;
|
||||
};
|
||||
@@ -1211,12 +1193,12 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool enable);
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder,
|
||||
enum pipe pch_transcoder,
|
||||
bool enable);
|
||||
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder);
|
||||
enum pipe pch_transcoder);
|
||||
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
|
||||
@@ -1251,9 +1233,9 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask);
|
||||
u8 pipe_mask);
|
||||
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
unsigned int pipe_mask);
|
||||
u8 pipe_mask);
|
||||
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
@@ -1326,7 +1308,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
/* intel_display.c */
|
||||
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
|
||||
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
|
||||
void intel_update_rawclk(struct drm_i915_private *dev_priv);
|
||||
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
|
||||
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
|
||||
@@ -1335,7 +1317,6 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg);
|
||||
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
|
||||
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
|
||||
extern const struct drm_plane_funcs intel_plane_funcs;
|
||||
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
|
||||
unsigned int intel_fb_xy_to_linear(int x, int y,
|
||||
const struct intel_plane_state *state,
|
||||
@@ -1408,9 +1389,6 @@ void intel_unpin_fb_vma(struct i915_vma *vma);
|
||||
struct drm_framebuffer *
|
||||
intel_framebuffer_create(struct drm_i915_gem_object *obj,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
|
||||
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
|
||||
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
|
||||
int intel_prepare_plane_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state);
|
||||
void intel_cleanup_plane_fb(struct drm_plane *plane,
|
||||
@@ -1597,7 +1575,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config_async(struct drm_device *dev);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
|
||||
extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||
extern void intel_fbdev_restore_mode(struct drm_device *dev);
|
||||
@@ -1611,7 +1590,11 @@ static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_fini(struct drm_device *dev)
|
||||
static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1696,6 +1679,7 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
|
||||
/* intel_panel.c */
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *alt_fixed_mode,
|
||||
struct drm_display_mode *downclock_mode);
|
||||
void intel_panel_fini(struct intel_panel *panel);
|
||||
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
@@ -1858,9 +1842,8 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
||||
struct intel_rps_client *rps,
|
||||
unsigned long submitted);
|
||||
void gen6_rps_boost(struct drm_i915_gem_request *rq,
|
||||
struct intel_rps_client *rps);
|
||||
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
|
||||
void g4x_wm_get_hw_state(struct drm_device *dev);
|
||||
void vlv_wm_get_hw_state(struct drm_device *dev);
|
||||
@@ -1902,7 +1885,7 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_i915_private *dev_priv);
|
||||
|
@@ -1653,12 +1653,10 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dsi_connector_destroy,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
@@ -1851,7 +1849,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
|
||||
connector->display_info.width_mm = fixed_mode->width_mm;
|
||||
connector->display_info.height_mm = fixed_mode->height_mm;
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
intel_dsi_add_properties(intel_connector);
|
||||
|
@@ -344,13 +344,11 @@ static void intel_dvo_destroy(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_dvo_detect,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_dvo_destroy,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
};
|
||||
@@ -554,7 +552,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_panel_init(&intel_connector->panel,
|
||||
intel_dvo_get_current_mode(connector),
|
||||
NULL);
|
||||
NULL, NULL);
|
||||
intel_dvo->panel_wants_dither = true;
|
||||
}
|
||||
|
||||
|
@@ -149,6 +149,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
|
||||
switch (INTEL_GEN(dev_priv)) {
|
||||
default:
|
||||
MISSING_CASE(INTEL_GEN(dev_priv));
|
||||
case 10:
|
||||
case 9:
|
||||
return GEN9_LR_CONTEXT_RENDER_SIZE;
|
||||
case 8:
|
||||
@@ -291,11 +292,9 @@ cleanup:
|
||||
*/
|
||||
int intel_engines_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id, err_id;
|
||||
unsigned int mask = 0;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
const struct engine_class_info *class_info =
|
||||
@@ -306,40 +305,30 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
|
||||
init = class_info->init_execlists;
|
||||
else
|
||||
init = class_info->init_legacy;
|
||||
if (!init) {
|
||||
kfree(engine);
|
||||
dev_priv->engine[id] = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
err_id = id;
|
||||
|
||||
if (GEM_WARN_ON(!init))
|
||||
goto cleanup;
|
||||
|
||||
err = init(engine);
|
||||
if (err) {
|
||||
err_id = id;
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!engine->submit_request);
|
||||
mask |= ENGINE_MASK(id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Catch failures to update intel_engines table when the new engines
|
||||
* are added to the driver by a warning and disabling the forgotten
|
||||
* engines.
|
||||
*/
|
||||
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
|
||||
device_info->ring_mask = mask;
|
||||
|
||||
device_info->num_rings = hweight32(mask);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
if (id >= err_id)
|
||||
if (id >= err_id) {
|
||||
kfree(engine);
|
||||
else
|
||||
dev_priv->engine[id] = NULL;
|
||||
} else {
|
||||
dev_priv->gt.cleanup_engine(engine);
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -348,9 +337,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_is_idle(engine));
|
||||
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
|
||||
|
||||
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
|
||||
* so long as the semaphore value in the register/page is greater
|
||||
* than the sync value), so whenever we reset the seqno,
|
||||
@@ -1294,6 +1280,10 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
||||
if (port_request(&engine->execlist_port[0]))
|
||||
return false;
|
||||
|
||||
/* ELSP is empty, but there are ready requests? */
|
||||
if (READ_ONCE(engine->execlist_first))
|
||||
return false;
|
||||
|
||||
/* Ring stopped? */
|
||||
if (!ring_is_idle(engine))
|
||||
return false;
|
||||
@@ -1340,6 +1330,7 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
|
||||
for_each_engine(engine, i915, id) {
|
||||
intel_engine_disarm_breadcrumbs(engine);
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
tasklet_kill(&engine->irq_tasklet);
|
||||
engine->no_priolist = false;
|
||||
}
|
||||
}
|
||||
|
@@ -461,6 +461,8 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
|
||||
struct intel_fbc_work *work = &fbc->work;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&fbc->lock));
|
||||
if (WARN_ON(!fbc->enabled))
|
||||
return;
|
||||
|
||||
if (drm_crtc_vblank_get(&crtc->base)) {
|
||||
DRM_ERROR("vblank not available for FBC on pipe %c\n",
|
||||
@@ -1216,7 +1218,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
|
||||
mutex_lock(&fbc->lock);
|
||||
|
||||
/* Maybe we were scheduled twice. */
|
||||
if (fbc->underrun_detected)
|
||||
if (fbc->underrun_detected || !fbc->enabled)
|
||||
goto out;
|
||||
|
||||
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
|
||||
|
@@ -232,7 +232,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
||||
strcpy(info->fix.id, "inteldrmfb");
|
||||
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
||||
info->fbops = &intelfb_ops;
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
@@ -281,27 +280,6 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** Sets the color ramps on behalf of RandR */
|
||||
static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
|
||||
u16 blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
intel_crtc->lut_r[regno] = red >> 8;
|
||||
intel_crtc->lut_g[regno] = green >> 8;
|
||||
intel_crtc->lut_b[regno] = blue >> 8;
|
||||
}
|
||||
|
||||
static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
u16 *blue, int regno)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
*red = intel_crtc->lut_r[regno] << 8;
|
||||
*green = intel_crtc->lut_g[regno] << 8;
|
||||
*blue = intel_crtc->lut_b[regno] << 8;
|
||||
}
|
||||
|
||||
static struct drm_fb_helper_crtc *
|
||||
intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
|
||||
{
|
||||
@@ -352,14 +330,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true;
|
||||
bool fallback = true, ret = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
|
||||
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
|
||||
if (!save_enabled)
|
||||
return false;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0)
|
||||
drm_modeset_backoff(&ctx);
|
||||
|
||||
memcpy(save_enabled, enabled, count);
|
||||
mask = GENMASK(count - 1, 0);
|
||||
conn_configured = 0;
|
||||
@@ -370,7 +354,6 @@ retry:
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_fb_helper_crtc *new_crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
@@ -412,13 +395,6 @@ retry:
|
||||
|
||||
num_connectors_enabled++;
|
||||
|
||||
intel_crtc = to_intel_crtc(connector->state->crtc);
|
||||
for (j = 0; j < 256; j++) {
|
||||
intel_crtc->lut_r[j] = j;
|
||||
intel_crtc->lut_g[j] = j;
|
||||
intel_crtc->lut_b[j] = j;
|
||||
}
|
||||
|
||||
new_crtc = intel_fb_helper_crtc(fb_helper,
|
||||
connector->state->crtc);
|
||||
|
||||
@@ -509,18 +485,18 @@ retry:
|
||||
bail:
|
||||
DRM_DEBUG_KMS("Not using firmware configuration\n");
|
||||
memcpy(enabled, save_enabled, count);
|
||||
kfree(save_enabled);
|
||||
return false;
|
||||
ret = false;
|
||||
}
|
||||
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
kfree(save_enabled);
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
||||
.initial_config = intel_fb_initial_config,
|
||||
.gamma_set = intel_crtc_fb_gamma_set,
|
||||
.gamma_get = intel_crtc_fb_gamma_get,
|
||||
.fb_probe = intelfb_create,
|
||||
};
|
||||
|
||||
@@ -531,8 +507,6 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
|
||||
* trying to rectify all the possible error paths leading here.
|
||||
*/
|
||||
|
||||
drm_fb_helper_unregister_fbi(&ifbdev->helper);
|
||||
|
||||
drm_fb_helper_fini(&ifbdev->helper);
|
||||
|
||||
if (ifbdev->vma) {
|
||||
@@ -720,8 +694,10 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
|
||||
/* Due to peculiar init order wrt to hpd handling this is separate. */
|
||||
if (drm_fb_helper_initial_config(&ifbdev->helper,
|
||||
ifbdev->preferred_bpp))
|
||||
intel_fbdev_fini(ifbdev->helper.dev);
|
||||
ifbdev->preferred_bpp)) {
|
||||
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
|
||||
intel_fbdev_fini(to_i915(ifbdev->helper.dev));
|
||||
}
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
@@ -744,9 +720,8 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
|
||||
ifbdev->cookie = 0;
|
||||
}
|
||||
|
||||
void intel_fbdev_fini(struct drm_device *dev)
|
||||
void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
|
||||
if (!ifbdev)
|
||||
@@ -756,8 +731,17 @@ void intel_fbdev_fini(struct drm_device *dev)
|
||||
if (!current_is_async())
|
||||
intel_fbdev_sync(ifbdev);
|
||||
|
||||
drm_fb_helper_unregister_fbi(&ifbdev->helper);
|
||||
}
|
||||
|
||||
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
intel_fbdev_destroy(ifbdev);
|
||||
dev_priv->fbdev = NULL;
|
||||
}
|
||||
|
||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
@@ -813,7 +797,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||
|
||||
if (ifbdev && ifbdev->vma)
|
||||
if (ifbdev)
|
||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||
}
|
||||
|
||||
|
@@ -313,11 +313,11 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
* Returns the previous state of underrun reporting.
|
||||
*/
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder,
|
||||
enum pipe pch_transcoder,
|
||||
bool enable)
|
||||
{
|
||||
struct intel_crtc *crtc =
|
||||
intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
|
||||
intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
|
||||
unsigned long flags;
|
||||
bool old;
|
||||
|
||||
@@ -390,7 +390,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
* interrupt to avoid an irq storm.
|
||||
*/
|
||||
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder)
|
||||
enum pipe pch_transcoder)
|
||||
{
|
||||
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
|
||||
false)) {
|
||||
|
@@ -324,7 +324,7 @@ hangcheck_get_action(struct intel_engine_cs *engine,
|
||||
if (engine->hangcheck.seqno != hc->seqno)
|
||||
return ENGINE_ACTIVE_SEQNO;
|
||||
|
||||
if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine)))
|
||||
if (intel_engine_is_idle(engine))
|
||||
return ENGINE_IDLE;
|
||||
|
||||
return engine_stuck(engine, hc->acthd);
|
||||
|
@@ -459,22 +459,31 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->base.adjusted_mode;
|
||||
struct drm_connector *connector = &intel_hdmi->attached_connector->base;
|
||||
bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
||||
adjusted_mode);
|
||||
adjusted_mode,
|
||||
is_hdmi2_sink);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("couldn't fill AVI infoframe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (crtc_state->ycbcr420)
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
|
||||
else
|
||||
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
|
||||
|
||||
drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
|
||||
crtc_state->limited_color_range ?
|
||||
HDMI_QUANTIZATION_RANGE_LIMITED :
|
||||
HDMI_QUANTIZATION_RANGE_FULL,
|
||||
intel_hdmi->rgb_quant_range_selectable);
|
||||
|
||||
/* TODO: handle pixel repetition for YCBCR420 outputs */
|
||||
intel_write_infoframe(encoder, crtc_state, &frame);
|
||||
}
|
||||
|
||||
@@ -1292,6 +1301,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
if (drm_mode_is_420_only(&connector->display_info, mode))
|
||||
clock /= 2;
|
||||
|
||||
/* check if we can do 8bpc */
|
||||
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
|
||||
|
||||
@@ -1321,14 +1333,21 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
|
||||
return false;
|
||||
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
for_each_new_connector_in_state(state, connector, connector_state, i) {
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
|
||||
if (connector_state->crtc != crtc_state->base.crtc)
|
||||
continue;
|
||||
|
||||
if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
|
||||
return false;
|
||||
if (crtc_state->ycbcr420) {
|
||||
const struct drm_hdmi_info *hdmi = &info->hdmi;
|
||||
|
||||
if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
|
||||
return false;
|
||||
} else {
|
||||
if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Display Wa #1139 */
|
||||
@@ -1339,6 +1358,36 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *config,
|
||||
int *clock_12bpc, int *clock_8bpc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
|
||||
|
||||
if (!connector->ycbcr_420_allowed) {
|
||||
DRM_ERROR("Platform doesn't support YCBCR420 output\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* YCBCR420 TMDS rate requirement is half the pixel clock */
|
||||
config->port_clock /= 2;
|
||||
*clock_12bpc /= 2;
|
||||
*clock_8bpc /= 2;
|
||||
config->ycbcr420 = true;
|
||||
|
||||
/* YCBCR 420 output conversion needs a scaler */
|
||||
if (skl_update_scaler_crtc(config)) {
|
||||
DRM_DEBUG_KMS("Scaler allocation for output failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_pch_panel_fitting(intel_crtc, config,
|
||||
DRM_MODE_SCALE_FULLSCREEN);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
@@ -1346,7 +1395,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
|
||||
struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
|
||||
@@ -1376,6 +1426,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
clock_12bpc *= 2;
|
||||
}
|
||||
|
||||
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
|
||||
if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
|
||||
&clock_12bpc, &clock_8bpc)) {
|
||||
DRM_ERROR("Can't support YCBCR420 output\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
@@ -1703,11 +1761,9 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_hdmi_detect,
|
||||
.force = intel_hdmi_force,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.late_register = intel_connector_register,
|
||||
@@ -1787,6 +1843,93 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
||||
DRM_DEBUG_KMS("sink scrambling handled\n");
|
||||
}
|
||||
|
||||
static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
case PORT_C:
|
||||
ddc_pin = GMBUS_PIN_DPC;
|
||||
break;
|
||||
case PORT_D:
|
||||
ddc_pin = GMBUS_PIN_DPD_CHV;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
}
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
break;
|
||||
case PORT_C:
|
||||
ddc_pin = GMBUS_PIN_2_BXT;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
break;
|
||||
}
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
break;
|
||||
case PORT_C:
|
||||
ddc_pin = GMBUS_PIN_2_BXT;
|
||||
break;
|
||||
case PORT_D:
|
||||
ddc_pin = GMBUS_PIN_4_CNP;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
break;
|
||||
}
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
u8 ddc_pin;
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
case PORT_C:
|
||||
ddc_pin = GMBUS_PIN_DPC;
|
||||
break;
|
||||
case PORT_D:
|
||||
ddc_pin = GMBUS_PIN_DPD;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
}
|
||||
return ddc_pin;
|
||||
}
|
||||
|
||||
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
@@ -1800,32 +1943,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
return info->alternate_ddc_pin;
|
||||
}
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
else
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
case PORT_C:
|
||||
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_2_BXT;
|
||||
else
|
||||
ddc_pin = GMBUS_PIN_DPC;
|
||||
break;
|
||||
case PORT_D:
|
||||
if (HAS_PCH_CNP(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_4_CNP;
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_DPD_CHV;
|
||||
else
|
||||
ddc_pin = GMBUS_PIN_DPD;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
}
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
|
||||
else
|
||||
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
|
||||
|
||||
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
|
||||
ddc_pin, port_name(port));
|
||||
@@ -1859,25 +1984,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
connector->doublescan_allowed = 0;
|
||||
connector->stereo_allowed = 1;
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
connector->ycbcr_420_allowed = true;
|
||||
|
||||
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
intel_encoder->hpd_pin = HPD_PORT_B;
|
||||
break;
|
||||
case PORT_C:
|
||||
intel_encoder->hpd_pin = HPD_PORT_C;
|
||||
break;
|
||||
case PORT_D:
|
||||
intel_encoder->hpd_pin = HPD_PORT_D;
|
||||
break;
|
||||
case PORT_E:
|
||||
intel_encoder->hpd_pin = HPD_PORT_E;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
if (WARN_ON(port == PORT_A))
|
||||
return;
|
||||
}
|
||||
intel_encoder->hpd_pin = intel_hpd_pin(port);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
intel_hdmi->write_infoframe = vlv_write_infoframe;
|
||||
|
@@ -76,26 +76,54 @@
|
||||
* it will use i915_hotplug_work_func where this logic is handled.
|
||||
*/
|
||||
|
||||
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
|
||||
/**
|
||||
* intel_hpd_port - return port hard associated with certain pin.
|
||||
* @pin: the hpd pin to get associated port
|
||||
*
|
||||
* Return port that is associatade with @pin and PORT_NONE if no port is
|
||||
* hard associated with that @pin.
|
||||
*/
|
||||
enum port intel_hpd_pin_to_port(enum hpd_pin pin)
|
||||
{
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
*port = PORT_A;
|
||||
return true;
|
||||
return PORT_A;
|
||||
case HPD_PORT_B:
|
||||
*port = PORT_B;
|
||||
return true;
|
||||
return PORT_B;
|
||||
case HPD_PORT_C:
|
||||
*port = PORT_C;
|
||||
return true;
|
||||
return PORT_C;
|
||||
case HPD_PORT_D:
|
||||
*port = PORT_D;
|
||||
return true;
|
||||
return PORT_D;
|
||||
case HPD_PORT_E:
|
||||
*port = PORT_E;
|
||||
return true;
|
||||
return PORT_E;
|
||||
default:
|
||||
return false; /* no hpd */
|
||||
return PORT_NONE; /* no port for this pin */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_pin - return pin hard associated with certain port.
|
||||
* @port: the hpd port to get associated pin
|
||||
*
|
||||
* Return pin that is associatade with @port and HDP_NONE if no pin is
|
||||
* hard associated with that @port.
|
||||
*/
|
||||
enum hpd_pin intel_hpd_pin(enum port port)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
return HPD_PORT_A;
|
||||
case PORT_B:
|
||||
return HPD_PORT_B;
|
||||
case PORT_C:
|
||||
return HPD_PORT_C;
|
||||
case PORT_D:
|
||||
return HPD_PORT_D;
|
||||
case PORT_E:
|
||||
return HPD_PORT_E;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
return HPD_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,8 +417,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
if (!(BIT(i) & pin_mask))
|
||||
continue;
|
||||
|
||||
is_dig_port = intel_hpd_pin_to_port(i, &port) &&
|
||||
dev_priv->hotplug.irq_port[port];
|
||||
port = intel_hpd_pin_to_port(i);
|
||||
is_dig_port = port != PORT_NONE &&
|
||||
dev_priv->hotplug.irq_port[port];
|
||||
|
||||
if (is_dig_port) {
|
||||
bool long_hpd = long_mask & BIT(i);
|
||||
|
@@ -592,7 +592,6 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||
int ret;
|
||||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
mutex_lock(&dev_priv->gmbus_mutex);
|
||||
|
||||
if (bus->force_bit) {
|
||||
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
|
||||
@@ -604,7 +603,6 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
||||
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
return ret;
|
||||
@@ -624,6 +622,39 @@ static const struct i2c_algorithm gmbus_algorithm = {
|
||||
.functionality = gmbus_func
|
||||
};
|
||||
|
||||
static void gmbus_lock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct intel_gmbus *bus = to_intel_gmbus(adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
mutex_lock(&dev_priv->gmbus_mutex);
|
||||
}
|
||||
|
||||
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct intel_gmbus *bus = to_intel_gmbus(adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
return mutex_trylock(&dev_priv->gmbus_mutex);
|
||||
}
|
||||
|
||||
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct intel_gmbus *bus = to_intel_gmbus(adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||
}
|
||||
|
||||
const struct i2c_lock_operations gmbus_lock_ops = {
|
||||
.lock_bus = gmbus_lock_bus,
|
||||
.trylock_bus = gmbus_trylock_bus,
|
||||
.unlock_bus = gmbus_unlock_bus,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
|
||||
* @dev_priv: i915 device private
|
||||
@@ -665,6 +696,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
|
||||
bus->dev_priv = dev_priv;
|
||||
|
||||
bus->adapter.algo = &gmbus_algorithm;
|
||||
bus->adapter.lock_ops = &gmbus_lock_ops;
|
||||
|
||||
/*
|
||||
* We wish to retry with bit banging
|
||||
|
@@ -1327,6 +1327,31 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
||||
{
|
||||
struct execlist_port *port = engine->execlist_port;
|
||||
struct intel_context *ce;
|
||||
unsigned int n;
|
||||
|
||||
/*
|
||||
* Catch up with any missed context-switch interrupts.
|
||||
*
|
||||
* Ideally we would just read the remaining CSB entries now that we
|
||||
* know the gpu is idle. However, the CSB registers are sometimes^W
|
||||
* often trashed across a GPU reset! Instead we have to rely on
|
||||
* guessing the missed context-switch events by looking at what
|
||||
* requests were completed.
|
||||
*/
|
||||
if (!request) {
|
||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
|
||||
i915_gem_request_put(port_request(&port[n]));
|
||||
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
|
||||
return;
|
||||
}
|
||||
|
||||
if (request->ctx != port_request(port)->ctx) {
|
||||
i915_gem_request_put(port_request(port));
|
||||
port[0] = port[1];
|
||||
memset(&port[1], 0, sizeof(port[1]));
|
||||
}
|
||||
|
||||
GEM_BUG_ON(request->ctx != port_request(port)->ctx);
|
||||
|
||||
/* If the request was innocent, we leave the request in the ELSP
|
||||
* and will try to replay it on restarting. The context image may
|
||||
@@ -1338,7 +1363,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
||||
* and have to at least restore the RING register in the context
|
||||
* image back to the expected values to skip over the guilty request.
|
||||
*/
|
||||
if (!request || request->fence.error != -EIO)
|
||||
if (request->fence.error != -EIO)
|
||||
return;
|
||||
|
||||
/* We want a simple context + ring to execute the breadcrumb update.
|
||||
@@ -1360,15 +1385,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
||||
request->ring->head = request->postfix;
|
||||
intel_ring_update_space(request->ring);
|
||||
|
||||
/* Catch up with any missed context-switch interrupts */
|
||||
if (request->ctx != port_request(port)->ctx) {
|
||||
i915_gem_request_put(port_request(port));
|
||||
port[0] = port[1];
|
||||
memset(&port[1], 0, sizeof(port[1]));
|
||||
}
|
||||
|
||||
GEM_BUG_ON(request->ctx != port_request(port)->ctx);
|
||||
|
||||
/* Reset WaIdleLiteRestore:bdw,skl as well */
|
||||
request->tail =
|
||||
intel_ring_wrap(request->ring,
|
||||
@@ -2092,7 +2108,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
|
||||
* So to avoid that we reset the context images upon resume. For
|
||||
* simplicity, we just zero everything out.
|
||||
*/
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link) {
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
u32 *reg;
|
||||
|
@@ -595,10 +595,8 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_lvds_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = drm_atomic_helper_connector_set_property,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.late_register = intel_connector_register,
|
||||
@@ -1140,7 +1138,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
|
||||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, NULL,
|
||||
downclock_mode);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
|
||||
|
@@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <acpi/video.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
@@ -829,6 +830,10 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
|
||||
memunmap(opregion->rvda);
|
||||
opregion->rvda = NULL;
|
||||
}
|
||||
if (opregion->vbt_firmware) {
|
||||
kfree(opregion->vbt_firmware);
|
||||
opregion->vbt_firmware = NULL;
|
||||
}
|
||||
opregion->header = NULL;
|
||||
opregion->acpi = NULL;
|
||||
opregion->swsci = NULL;
|
||||
@@ -912,6 +917,43 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
const struct firmware *fw = NULL;
|
||||
const char *name = i915.vbt_firmware;
|
||||
int ret;
|
||||
|
||||
if (!name || !*name)
|
||||
return -ENOENT;
|
||||
|
||||
ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
|
||||
name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
|
||||
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
|
||||
if (opregion->vbt_firmware) {
|
||||
DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
|
||||
opregion->vbt = opregion->vbt_firmware;
|
||||
opregion->vbt_size = fw->size;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
@@ -974,6 +1016,9 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
if (mboxes & MBOX_ASLE_EXT)
|
||||
DRM_DEBUG_DRIVER("ASLE extension supported\n");
|
||||
|
||||
if (intel_load_vbt_firmware(dev_priv) == 0)
|
||||
goto out;
|
||||
|
||||
if (dmi_check_system(intel_no_opregion_vbt))
|
||||
goto out;
|
||||
|
||||
|
@@ -799,9 +799,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto out_pin_section;
|
||||
}
|
||||
|
||||
ret = i915_vma_put_fence(vma);
|
||||
if (ret)
|
||||
@@ -886,6 +890,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
out_pin_section:
|
||||
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -110,7 +110,8 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
|
||||
!pipe_config->ycbcr420)
|
||||
goto done;
|
||||
|
||||
switch (fitting_mode) {
|
||||
@@ -1919,11 +1920,13 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
||||
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *alt_fixed_mode,
|
||||
struct drm_display_mode *downclock_mode)
|
||||
{
|
||||
intel_panel_init_backlight_funcs(panel);
|
||||
|
||||
panel->fixed_mode = fixed_mode;
|
||||
panel->alt_fixed_mode = alt_fixed_mode;
|
||||
panel->downclock_mode = downclock_mode;
|
||||
|
||||
return 0;
|
||||
@@ -1937,6 +1940,10 @@ void intel_panel_fini(struct intel_panel *panel)
|
||||
if (panel->fixed_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
|
||||
|
||||
if (panel->alt_fixed_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->alt_fixed_mode);
|
||||
|
||||
if (panel->downclock_mode)
|
||||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->downclock_mode);
|
||||
|
@@ -62,6 +62,20 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(CHICKEN_PAR1_1,
|
||||
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
|
||||
|
||||
/*
|
||||
* Display WA#0390: skl,bxt,kbl,glk
|
||||
*
|
||||
* Must match Sampler, Pixel Back End, and Media
|
||||
* (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
|
||||
*
|
||||
* Including bits outside the page in the hash would
|
||||
* require 2 (or 4?) MiB alignment of resources. Just
|
||||
* assume the defaul hashing mode which only uses bits
|
||||
* within the page.
|
||||
*/
|
||||
I915_WRITE(CHICKEN_PAR1_1,
|
||||
I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
|
||||
|
||||
I915_WRITE(GEN8_CONFIG0,
|
||||
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
|
||||
|
||||
@@ -78,6 +92,12 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
|
||||
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
|
||||
ILK_DPFC_DISABLE_DUMMY0);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv)) {
|
||||
/* WaDisableDopClockGating */
|
||||
I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
|
||||
& ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
}
|
||||
}
|
||||
|
||||
static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
@@ -2758,7 +2778,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
|
||||
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||
uint16_t wm[8])
|
||||
{
|
||||
if (IS_GEN9(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
uint32_t val;
|
||||
int ret, i;
|
||||
int level, max_level = ilk_wm_max_level(dev_priv);
|
||||
@@ -2818,7 +2838,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
/*
|
||||
* WaWmMemoryReadLatency:skl,glk
|
||||
* WaWmMemoryReadLatency:skl+,glk
|
||||
*
|
||||
* punit doesn't take into account the read latency so we need
|
||||
* to add 2us to the various latency levels we retrieve from the
|
||||
@@ -2857,6 +2877,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||
wm[0] = 7;
|
||||
wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
|
||||
wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
|
||||
} else {
|
||||
MISSING_CASE(INTEL_DEVID(dev_priv));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2912,7 +2934,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
|
||||
* - latencies are in us on gen9.
|
||||
* - before then, WM1+ latency values are in 0.5us units
|
||||
*/
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
latency *= 10;
|
||||
else if (level > 0)
|
||||
latency *= 5;
|
||||
@@ -3530,8 +3552,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
|
||||
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
|
||||
}
|
||||
|
||||
#define SKL_SAGV_BLOCK_TIME 30 /* µs */
|
||||
|
||||
/*
|
||||
* FIXME: We still don't have the proper code detect if we need to apply the WA,
|
||||
* so assume we'll always need it in order to avoid underruns.
|
||||
@@ -3549,7 +3569,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
|
||||
static bool
|
||||
intel_has_sagv(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
|
||||
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
|
||||
IS_CANNONLAKE(dev_priv))
|
||||
return true;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) &&
|
||||
@@ -3655,12 +3676,13 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
|
||||
struct intel_crtc_state *cstate;
|
||||
enum pipe pipe;
|
||||
int level, latency;
|
||||
int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20;
|
||||
|
||||
if (!intel_has_sagv(dev_priv))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* SKL workaround: bspec recommends we disable the SAGV when we have
|
||||
* SKL+ workaround: bspec recommends we disable the SAGV when we have
|
||||
* more then one pipe enabled
|
||||
*
|
||||
* If there are no active CRTCs, no additional checks need be performed
|
||||
@@ -3699,11 +3721,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
|
||||
latency += 15;
|
||||
|
||||
/*
|
||||
* If any of the planes on this pipe don't enable wm levels
|
||||
* that incur memory latencies higher then 30µs we can't enable
|
||||
* the SAGV
|
||||
* If any of the planes on this pipe don't enable wm levels that
|
||||
* incur memory latencies higher than sagv_block_time_us we
|
||||
* can't enable the SAGV.
|
||||
*/
|
||||
if (latency < SKL_SAGV_BLOCK_TIME)
|
||||
if (latency < sagv_block_time_us)
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3837,7 +3859,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
|
||||
uint_fixed_16_16_t downscale_h, downscale_w;
|
||||
|
||||
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
|
||||
return u32_to_fixed_16_16(0);
|
||||
return u32_to_fixed16(0);
|
||||
|
||||
/* n.b., src is 16.16 fixed point, dst is whole integer */
|
||||
if (plane->id == PLANE_CURSOR) {
|
||||
@@ -3861,10 +3883,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
|
||||
dst_h = drm_rect_height(&pstate->base.dst);
|
||||
}
|
||||
|
||||
fp_w_ratio = fixed_16_16_div(src_w, dst_w);
|
||||
fp_h_ratio = fixed_16_16_div(src_h, dst_h);
|
||||
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
|
||||
downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
|
||||
fp_w_ratio = div_fixed16(src_w, dst_w);
|
||||
fp_h_ratio = div_fixed16(src_h, dst_h);
|
||||
downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
|
||||
downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
|
||||
|
||||
return mul_fixed16(downscale_w, downscale_h);
|
||||
}
|
||||
@@ -3872,7 +3894,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
|
||||
static uint_fixed_16_16_t
|
||||
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1);
|
||||
uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
|
||||
|
||||
if (!crtc_state->base.enable)
|
||||
return pipe_downscale;
|
||||
@@ -3891,10 +3913,10 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
|
||||
if (!dst_w || !dst_h)
|
||||
return pipe_downscale;
|
||||
|
||||
fp_w_ratio = fixed_16_16_div(src_w, dst_w);
|
||||
fp_h_ratio = fixed_16_16_div(src_h, dst_h);
|
||||
downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
|
||||
downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
|
||||
fp_w_ratio = div_fixed16(src_w, dst_w);
|
||||
fp_h_ratio = div_fixed16(src_h, dst_h);
|
||||
downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
|
||||
downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
|
||||
|
||||
pipe_downscale = mul_fixed16(downscale_w, downscale_h);
|
||||
}
|
||||
@@ -3913,14 +3935,14 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
|
||||
int crtc_clock, dotclk;
|
||||
uint32_t pipe_max_pixel_rate;
|
||||
uint_fixed_16_16_t pipe_downscale;
|
||||
uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1);
|
||||
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
|
||||
|
||||
if (!cstate->base.enable)
|
||||
return 0;
|
||||
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
uint_fixed_16_16_t plane_downscale;
|
||||
uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8);
|
||||
uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
|
||||
int bpp;
|
||||
|
||||
if (!intel_wm_plane_visible(cstate,
|
||||
@@ -3938,7 +3960,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
|
||||
plane_downscale = mul_fixed16(plane_downscale,
|
||||
fp_9_div_8);
|
||||
|
||||
max_downscale = max_fixed_16_16(plane_downscale, max_downscale);
|
||||
max_downscale = max_fixed16(plane_downscale, max_downscale);
|
||||
}
|
||||
pipe_downscale = skl_pipe_downscale_amount(cstate);
|
||||
|
||||
@@ -4071,7 +4093,9 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
|
||||
|
||||
/* For Non Y-tile return 8-blocks */
|
||||
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
|
||||
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
|
||||
fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
|
||||
fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
|
||||
fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
|
||||
return 8;
|
||||
|
||||
/*
|
||||
@@ -4266,8 +4290,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
|
||||
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
|
||||
*/
|
||||
static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
|
||||
uint32_t latency)
|
||||
static uint_fixed_16_16_t
|
||||
skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
|
||||
uint8_t cpp, uint32_t latency)
|
||||
{
|
||||
uint32_t wm_intermediate_val;
|
||||
uint_fixed_16_16_t ret;
|
||||
@@ -4276,7 +4301,11 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
|
||||
return FP_16_16_MAX;
|
||||
|
||||
wm_intermediate_val = latency * pixel_rate * cpp;
|
||||
ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512);
|
||||
ret = div_fixed16(wm_intermediate_val, 1000 * 512);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
ret = add_fixed16_u32(ret, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4294,7 +4323,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
|
||||
wm_intermediate_val = latency * pixel_rate;
|
||||
wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
|
||||
pipe_htotal * 1000);
|
||||
ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
|
||||
ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4306,15 +4335,15 @@ intel_get_linetime_us(struct intel_crtc_state *cstate)
|
||||
uint_fixed_16_16_t linetime_us;
|
||||
|
||||
if (!cstate->base.active)
|
||||
return u32_to_fixed_16_16(0);
|
||||
return u32_to_fixed16(0);
|
||||
|
||||
pixel_rate = cstate->pixel_rate;
|
||||
|
||||
if (WARN_ON(pixel_rate == 0))
|
||||
return u32_to_fixed_16_16(0);
|
||||
return u32_to_fixed16(0);
|
||||
|
||||
crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
|
||||
linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate);
|
||||
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
|
||||
|
||||
return linetime_us;
|
||||
}
|
||||
@@ -4361,7 +4390,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
uint32_t plane_bytes_per_line;
|
||||
uint32_t res_blocks, res_lines;
|
||||
uint8_t cpp;
|
||||
uint32_t width = 0, height = 0;
|
||||
uint32_t width = 0;
|
||||
uint32_t plane_pixel_rate;
|
||||
uint_fixed_16_16_t y_tile_minimum;
|
||||
uint32_t y_min_scanlines;
|
||||
@@ -4377,7 +4406,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
|
||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED;
|
||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
|
||||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
|
||||
x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
|
||||
|
||||
/* Display WA #1141: kbl,cfl */
|
||||
@@ -4390,7 +4421,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
|
||||
if (plane->id == PLANE_CURSOR) {
|
||||
width = intel_pstate->base.crtc_w;
|
||||
height = intel_pstate->base.crtc_h;
|
||||
} else {
|
||||
/*
|
||||
* Src coordinates are already rotated by 270 degrees for
|
||||
@@ -4398,16 +4428,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
* GTT mapping), hence no need to account for rotation here.
|
||||
*/
|
||||
width = drm_rect_width(&intel_pstate->base.src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->base.src) >> 16;
|
||||
}
|
||||
|
||||
cpp = fb->format->cpp[0];
|
||||
cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
|
||||
fb->format->cpp[0];
|
||||
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
|
||||
|
||||
if (drm_rotation_90_or_270(pstate->rotation)) {
|
||||
int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
|
||||
fb->format->cpp[1] :
|
||||
fb->format->cpp[0];
|
||||
|
||||
switch (cpp) {
|
||||
case 1:
|
||||
@@ -4434,51 +4461,62 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
if (y_tiled) {
|
||||
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
|
||||
y_min_scanlines, 512);
|
||||
plane_blocks_per_line = fixed_16_16_div(interm_pbpl,
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
interm_pbpl++;
|
||||
|
||||
plane_blocks_per_line = div_fixed16(interm_pbpl,
|
||||
y_min_scanlines);
|
||||
} else if (x_tiled) {
|
||||
} else if (x_tiled && INTEL_GEN(dev_priv) == 9) {
|
||||
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
|
||||
plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
|
||||
} else {
|
||||
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
|
||||
plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
|
||||
plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
|
||||
}
|
||||
|
||||
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
|
||||
method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency);
|
||||
method2 = skl_wm_method2(plane_pixel_rate,
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
latency,
|
||||
plane_blocks_per_line);
|
||||
|
||||
y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
|
||||
plane_blocks_per_line);
|
||||
y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
|
||||
plane_blocks_per_line);
|
||||
|
||||
if (y_tiled) {
|
||||
selected_result = max_fixed_16_16(method2, y_tile_minimum);
|
||||
selected_result = max_fixed16(method2, y_tile_minimum);
|
||||
} else {
|
||||
uint32_t linetime_us;
|
||||
|
||||
linetime_us = fixed_16_16_to_u32_round_up(
|
||||
linetime_us = fixed16_to_u32_round_up(
|
||||
intel_get_linetime_us(cstate));
|
||||
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
|
||||
(plane_bytes_per_line / 512 < 1))
|
||||
selected_result = method2;
|
||||
else if (ddb_allocation >=
|
||||
fixed_16_16_to_u32_round_up(plane_blocks_per_line))
|
||||
selected_result = min_fixed_16_16(method1, method2);
|
||||
fixed16_to_u32_round_up(plane_blocks_per_line))
|
||||
selected_result = min_fixed16(method1, method2);
|
||||
else if (latency >= linetime_us)
|
||||
selected_result = min_fixed_16_16(method1, method2);
|
||||
selected_result = min_fixed16(method1, method2);
|
||||
else
|
||||
selected_result = method1;
|
||||
}
|
||||
|
||||
res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
|
||||
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
|
||||
res_lines = div_round_up_fixed16(selected_result,
|
||||
plane_blocks_per_line);
|
||||
|
||||
/* Display WA #1125: skl,bxt,kbl,glk */
|
||||
if (level == 0 &&
|
||||
(fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
|
||||
res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
|
||||
|
||||
/* Display WA #1126: skl,bxt,kbl,glk */
|
||||
if (level >= 1 && level <= 7) {
|
||||
if (y_tiled) {
|
||||
res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
|
||||
res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
|
||||
res_lines += y_min_scanlines;
|
||||
} else {
|
||||
res_blocks++;
|
||||
@@ -4563,8 +4601,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
|
||||
if (is_fixed16_zero(linetime_us))
|
||||
return 0;
|
||||
|
||||
linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8,
|
||||
linetime_us));
|
||||
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
|
||||
|
||||
/* Display WA #1135: bxt. */
|
||||
if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
|
||||
@@ -5852,7 +5889,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
|
||||
* the hw runs at the minimal clock before selecting the desired
|
||||
* frequency, if the down threshold expires in that window we will not
|
||||
* receive a down interrupt. */
|
||||
if (IS_GEN9(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
limits = (dev_priv->rps.max_freq_softlimit) << 23;
|
||||
if (val <= dev_priv->rps.min_freq_softlimit)
|
||||
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
|
||||
@@ -5994,7 +6031,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
|
||||
if (val != dev_priv->rps.cur_freq) {
|
||||
gen6_set_rps_thresholds(dev_priv, val);
|
||||
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
I915_WRITE(GEN6_RPNSWREQ,
|
||||
GEN9_FREQUENCY(val));
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
@@ -6126,47 +6163,35 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
spin_lock(&dev_priv->rps.client_lock);
|
||||
while (!list_empty(&dev_priv->rps.clients))
|
||||
list_del_init(dev_priv->rps.clients.next);
|
||||
spin_unlock(&dev_priv->rps.client_lock);
|
||||
}
|
||||
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
||||
struct intel_rps_client *rps,
|
||||
unsigned long submitted)
|
||||
void gen6_rps_boost(struct drm_i915_gem_request *rq,
|
||||
struct intel_rps_client *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rq->i915;
|
||||
bool boost;
|
||||
|
||||
/* This is intentionally racy! We peek at the state here, then
|
||||
* validate inside the RPS worker.
|
||||
*/
|
||||
if (!(dev_priv->gt.awake &&
|
||||
dev_priv->rps.enabled &&
|
||||
dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
|
||||
if (!i915->rps.enabled)
|
||||
return;
|
||||
|
||||
/* Force a RPS boost (and don't count it against the client) if
|
||||
* the GPU is severely congested.
|
||||
*/
|
||||
if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
|
||||
rps = NULL;
|
||||
|
||||
spin_lock(&dev_priv->rps.client_lock);
|
||||
if (rps == NULL || list_empty(&rps->link)) {
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->rps.interrupts_enabled) {
|
||||
dev_priv->rps.client_boost = true;
|
||||
schedule_work(&dev_priv->rps.work);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
if (rps != NULL) {
|
||||
list_add(&rps->link, &dev_priv->rps.clients);
|
||||
rps->boosts++;
|
||||
} else
|
||||
dev_priv->rps.boosts++;
|
||||
boost = false;
|
||||
spin_lock_irq(&rq->lock);
|
||||
if (!rq->waitboost && !i915_gem_request_completed(rq)) {
|
||||
atomic_inc(&i915->rps.num_waiters);
|
||||
rq->waitboost = true;
|
||||
boost = true;
|
||||
}
|
||||
spin_unlock(&dev_priv->rps.client_lock);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
if (!boost)
|
||||
return;
|
||||
|
||||
if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
|
||||
schedule_work(&i915->rps.work);
|
||||
|
||||
atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
|
||||
}
|
||||
|
||||
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
|
||||
@@ -6365,7 +6390,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
|
||||
|
||||
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
|
||||
IS_GEN9_BC(dev_priv)) {
|
||||
IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
u32 ddcc_status = 0;
|
||||
|
||||
if (sandybridge_pcode_read(dev_priv,
|
||||
@@ -6378,7 +6403,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
|
||||
dev_priv->rps.max_freq);
|
||||
}
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
/* Store the frequency values in 16.66 MHZ units, which is
|
||||
* the natural hardware unit for SKL
|
||||
*/
|
||||
@@ -6684,7 +6709,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
/* convert DDR frequency from units of 266.6MHz to bandwidth */
|
||||
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
/* Convert GT frequency to 50 HZ units */
|
||||
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
|
||||
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
|
||||
@@ -6702,7 +6727,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
int diff = max_gpu_freq - gpu_freq;
|
||||
unsigned int ia_freq = 0, ring_freq = 0;
|
||||
|
||||
if (IS_GEN9_BC(dev_priv)) {
|
||||
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
|
||||
/*
|
||||
* ring_freq = 2 * GT. ring_freq is in 100MHz units
|
||||
* No floor required for ring frequency on SKL.
|
||||
@@ -7833,7 +7858,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
} else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
gen9_enable_rc6(dev_priv);
|
||||
gen9_enable_rps(dev_priv);
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
gen8_enable_rps(dev_priv);
|
||||
@@ -8848,6 +8873,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
return 0;
|
||||
case GEN6_PCODE_UNIMPLEMENTED_CMD:
|
||||
return -ENODEV;
|
||||
case GEN6_PCODE_ILLEGAL_CMD:
|
||||
return -ENXIO;
|
||||
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
@@ -8895,7 +8921,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
||||
*/
|
||||
|
||||
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
|
||||
mbox, __builtin_return_address(0));
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@@ -8906,7 +8933,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
||||
if (__intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
500, 0, NULL)) {
|
||||
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
|
||||
DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
|
||||
mbox, __builtin_return_address(0));
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@@ -8919,8 +8947,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
|
||||
status = gen6_check_mailbox_status(dev_priv);
|
||||
|
||||
if (status) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
|
||||
status);
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
|
||||
mbox, __builtin_return_address(0), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -8940,7 +8968,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
|
||||
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
|
||||
val, mbox, __builtin_return_address(0));
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@@ -8951,7 +8980,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||
if (__intel_wait_for_register_fw(dev_priv,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
500, 0, NULL)) {
|
||||
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
|
||||
DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
|
||||
val, mbox, __builtin_return_address(0));
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@@ -8963,8 +8993,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
|
||||
status = gen6_check_mailbox_status(dev_priv);
|
||||
|
||||
if (status) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
|
||||
status);
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
|
||||
val, mbox, __builtin_return_address(0), status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -9078,7 +9108,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||
|
||||
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
|
||||
GEN9_FREQ_SCALER);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
@@ -9091,7 +9121,7 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||
|
||||
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
if (IS_GEN9(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
|
||||
GT_FREQUENCY_MULTIPLIER);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
@@ -9113,7 +9143,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
||||
struct drm_i915_gem_request *req = boost->req;
|
||||
|
||||
if (!i915_gem_request_completed(req))
|
||||
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
|
||||
gen6_rps_boost(req, NULL);
|
||||
|
||||
i915_gem_request_put(req);
|
||||
kfree(boost);
|
||||
@@ -9142,11 +9172,10 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
|
||||
void intel_pm_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
spin_lock_init(&dev_priv->rps.client_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
|
||||
__intel_autoenable_gt_powersave);
|
||||
INIT_LIST_HEAD(&dev_priv->rps.clients);
|
||||
atomic_set(&dev_priv->rps.num_waiters, 0);
|
||||
|
||||
dev_priv->pm.suspended = false;
|
||||
atomic_set(&dev_priv->pm.wakeref_count, 0);
|
||||
|
@@ -315,6 +315,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
|
||||
else
|
||||
val |= EDP_PSR_TP1_TP2_SEL;
|
||||
|
||||
val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
|
||||
I915_WRITE(EDP_PSR_CTL, val);
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Generated by: intel-gpu-tools-1.8-220-g01153e7
|
||||
* Generated by: intel-gpu-tools-1.19-177-g68e2eab2
|
||||
*/
|
||||
|
||||
#include "intel_renderstate.h"
|
||||
@@ -873,7 +873,7 @@ static const u32 gen9_null_state_batch[] = {
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78550003,
|
||||
0x00000000,
|
||||
0x0000000f,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user